mirror of
https://github.com/versity/scoutfs.git
synced 2025-12-23 05:25:18 +00:00
Merge pull request #152 from versity/zab/stuck_srch_compact
Zab/stuck srch compact
This commit is contained in:
@@ -1966,9 +1966,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
|||||||
ret = scoutfs_srch_get_compact(sb, &server->alloc, &server->wri,
|
ret = scoutfs_srch_get_compact(sb, &server->alloc, &server->wri,
|
||||||
&super->srch_root, rid, sc);
|
&super->srch_root, rid, sc);
|
||||||
mutex_unlock(&server->srch_mutex);
|
mutex_unlock(&server->srch_mutex);
|
||||||
if (ret == 0 && sc->nr == 0)
|
if (ret < 0 || (ret == 0 && sc->nr == 0))
|
||||||
ret = -ENOENT;
|
|
||||||
if (ret < 0)
|
|
||||||
goto apply;
|
goto apply;
|
||||||
|
|
||||||
mutex_lock(&server->alloc_mutex);
|
mutex_lock(&server->alloc_mutex);
|
||||||
|
|||||||
120
kmod/src/srch.c
120
kmod/src/srch.c
@@ -30,6 +30,7 @@
|
|||||||
#include "client.h"
|
#include "client.h"
|
||||||
#include "counters.h"
|
#include "counters.h"
|
||||||
#include "scoutfs_trace.h"
|
#include "scoutfs_trace.h"
|
||||||
|
#include "triggers.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This srch subsystem gives us a way to find inodes that have a given
|
* This srch subsystem gives us a way to find inodes that have a given
|
||||||
@@ -520,6 +521,95 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Padded entries are encoded in pairs after an existing entry. All of
|
||||||
|
* the pairs cancel each other out by all readers (the second encoding
|
||||||
|
* looks like deletion) so they aren't visible to the first/last bounds of
|
||||||
|
* the block or file.
|
||||||
|
*/
|
||||||
|
static int append_padded_entry(struct scoutfs_srch_file *sfl, u64 blk,
|
||||||
|
struct scoutfs_srch_block *srb, struct scoutfs_srch_entry *sre)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = encode_entry(srb->entries + le32_to_cpu(srb->entry_bytes),
|
||||||
|
sre, &srb->tail);
|
||||||
|
if (ret > 0) {
|
||||||
|
srb->tail = *sre;
|
||||||
|
le32_add_cpu(&srb->entry_nr, 1);
|
||||||
|
le32_add_cpu(&srb->entry_bytes, ret);
|
||||||
|
le64_add_cpu(&sfl->entries, 1);
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is called by a testing trigger to create a very specific case of
|
||||||
|
* encoded entry offsets. We want the last entry in the block to start
|
||||||
|
* precisely at the _SAFE_BYTES offset.
|
||||||
|
*
|
||||||
|
* This is called when there is a single existing entry in the block.
|
||||||
|
* We have the entire block to work with. We encode pairs of matching
|
||||||
|
* entries. This hides them from readers (both searches and merging) as
|
||||||
|
* they're interpreted as creation and deletion and are deleted. We use
|
||||||
|
* the existing hash value of the first entry in the block but then set
|
||||||
|
* the inode to an impossibly large number so it doesn't interfere with
|
||||||
|
* anything.
|
||||||
|
*
|
||||||
|
* To hit the specific offset we very carefully manage the amount of
|
||||||
|
* bytes of change between fields in the entry. We know that if we
|
||||||
|
* change all the byte of the ino and id we end up with a 20 byte
|
||||||
|
* (2+8+8,2) encoding of the pair of entries. To have the last entry
|
||||||
|
* start at the _SAFE_POS offset we know that the final 20 byte pair
|
||||||
|
* encoding needs to end at 2 bytes (second entry encoding) after the
|
||||||
|
* _SAFE_POS offset.
|
||||||
|
*
|
||||||
|
* So as we encode pairs we watch the delta of our current offset from
|
||||||
|
* that desired final offset of 2 past _SAFE_POS. If we're a multiple
|
||||||
|
* of 20 away then we encode the full 20 byte pairs. If we're not, then
|
||||||
|
* we drop a byte to encode 19 bytes. That'll slowly change the offset
|
||||||
|
* to be a multiple of 20 again while encoding large entries.
|
||||||
|
*/
|
||||||
|
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl, u64 blk,
|
||||||
|
struct scoutfs_srch_block *srb)
|
||||||
|
{
|
||||||
|
struct scoutfs_srch_entry sre;
|
||||||
|
u32 target;
|
||||||
|
s32 diff;
|
||||||
|
u64 hash;
|
||||||
|
u64 ino;
|
||||||
|
u64 id;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
hash = le64_to_cpu(srb->tail.hash);
|
||||||
|
ino = le64_to_cpu(srb->tail.ino) | (1ULL << 62);
|
||||||
|
id = le64_to_cpu(srb->tail.id);
|
||||||
|
|
||||||
|
target = SCOUTFS_SRCH_BLOCK_SAFE_BYTES + 2;
|
||||||
|
|
||||||
|
while ((diff = target - le32_to_cpu(srb->entry_bytes)) > 0) {
|
||||||
|
ino ^= 1ULL << (7 * 8);
|
||||||
|
if (diff % 20 == 0) {
|
||||||
|
id ^= 1ULL << (7 * 8);
|
||||||
|
} else {
|
||||||
|
id ^= 1ULL << (6 * 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
sre.hash = cpu_to_le64(hash);
|
||||||
|
sre.ino = cpu_to_le64(ino);
|
||||||
|
sre.id = cpu_to_le64(id);
|
||||||
|
|
||||||
|
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||||
|
if (ret == 0)
|
||||||
|
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
|
||||||
|
diff = target - le32_to_cpu(srb->entry_bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller is dropping an ino/id because the tracking rbtree is full.
|
* The caller is dropping an ino/id because the tracking rbtree is full.
|
||||||
* This loses information so we can't return any entries at or after the
|
* This loses information so we can't return any entries at or after the
|
||||||
@@ -987,6 +1077,9 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
|||||||
struct scoutfs_key key;
|
struct scoutfs_key key;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (sfl->ref.blkno && !force && scoutfs_trigger(sb, SRCH_FORCE_LOG_ROTATE))
|
||||||
|
force = true;
|
||||||
|
|
||||||
if (sfl->ref.blkno == 0 ||
|
if (sfl->ref.blkno == 0 ||
|
||||||
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1462,7 +1555,7 @@ static int kway_merge(struct super_block *sb,
|
|||||||
struct scoutfs_block_writer *wri,
|
struct scoutfs_block_writer *wri,
|
||||||
struct scoutfs_srch_file *sfl,
|
struct scoutfs_srch_file *sfl,
|
||||||
kway_get_t kway_get, kway_advance_t kway_adv,
|
kway_get_t kway_get, kway_advance_t kway_adv,
|
||||||
void **args, int nr)
|
void **args, int nr, bool logs_input)
|
||||||
{
|
{
|
||||||
DECLARE_SRCH_INFO(sb, srinf);
|
DECLARE_SRCH_INFO(sb, srinf);
|
||||||
struct scoutfs_srch_block *srb = NULL;
|
struct scoutfs_srch_block *srb = NULL;
|
||||||
@@ -1567,6 +1660,15 @@ static int kway_merge(struct super_block *sb,
|
|||||||
blk++;
|
blk++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* end sorted block on _SAFE offset for testing */
|
||||||
|
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
|
||||||
|
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
|
||||||
|
pad_entries_at_safe(sfl, blk, srb);
|
||||||
|
scoutfs_block_put(sb, bl);
|
||||||
|
bl = NULL;
|
||||||
|
blk++;
|
||||||
|
}
|
||||||
|
|
||||||
scoutfs_inc_counter(sb, srch_compact_entry);
|
scoutfs_inc_counter(sb, srch_compact_entry);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -1609,6 +1711,8 @@ static int kway_merge(struct super_block *sb,
|
|||||||
empty++;
|
empty++;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else if (ret < 0) {
|
} else if (ret < 0) {
|
||||||
|
if (ret == -ENOANO) /* just testing trigger */
|
||||||
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1816,7 +1920,7 @@ static int compact_logs(struct super_block *sb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_page, kway_adv_page,
|
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_page, kway_adv_page,
|
||||||
args, nr_pages);
|
args, nr_pages, true);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@@ -1874,12 +1978,18 @@ static int kway_get_reader(struct super_block *sb,
|
|||||||
srb = rdr->bl->data;
|
srb = rdr->bl->data;
|
||||||
|
|
||||||
if (rdr->pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
if (rdr->pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||||
rdr->skip >= SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
rdr->skip > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||||
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
|
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
|
||||||
/* XXX inconsistency */
|
/* XXX inconsistency */
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rdr->decoded_bytes == 0 && rdr->pos == SCOUTFS_SRCH_BLOCK_SAFE_BYTES &&
|
||||||
|
scoutfs_trigger(sb, SRCH_MERGE_STOP_SAFE)) {
|
||||||
|
/* only used in testing */
|
||||||
|
return -ENOANO;
|
||||||
|
}
|
||||||
|
|
||||||
/* decode entry, possibly skipping start of the block */
|
/* decode entry, possibly skipping start of the block */
|
||||||
while (rdr->decoded_bytes == 0 || rdr->pos < rdr->skip) {
|
while (rdr->decoded_bytes == 0 || rdr->pos < rdr->skip) {
|
||||||
ret = decode_entry(srb->entries + rdr->pos,
|
ret = decode_entry(srb->entries + rdr->pos,
|
||||||
@@ -1969,7 +2079,7 @@ static int compact_sorted(struct super_block *sb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_reader,
|
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_reader,
|
||||||
kway_adv_reader, args, nr);
|
kway_adv_reader, args, nr, false);
|
||||||
|
|
||||||
sc->flags |= SCOUTFS_SRCH_COMPACT_FLAG_DONE;
|
sc->flags |= SCOUTFS_SRCH_COMPACT_FLAG_DONE;
|
||||||
for (i = 0; i < nr; i++) {
|
for (i = 0; i < nr; i++) {
|
||||||
@@ -2179,7 +2289,7 @@ out:
|
|||||||
|
|
||||||
scoutfs_block_writer_forget_all(sb, &wri);
|
scoutfs_block_writer_forget_all(sb, &wri);
|
||||||
if (!atomic_read(&srinf->shutdown)) {
|
if (!atomic_read(&srinf->shutdown)) {
|
||||||
delay = ret == 0 ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
delay = (sc->nr > 0 && ret == 0) ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
||||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,9 @@ struct scoutfs_triggers {
|
|||||||
|
|
||||||
static char *names[] = {
|
static char *names[] = {
|
||||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||||
|
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||||
|
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||||
|
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,9 @@
|
|||||||
|
|
||||||
enum scoutfs_trigger {
|
enum scoutfs_trigger {
|
||||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||||
|
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||||
|
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||||
|
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||||
SCOUTFS_TRIGGER_NR,
|
SCOUTFS_TRIGGER_NR,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -265,6 +265,15 @@ t_trigger_get() {
|
|||||||
cat "$(t_trigger_path "$nr")/$which"
|
cat "$(t_trigger_path "$nr")/$which"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t_trigger_set() {
|
||||||
|
local which="$1"
|
||||||
|
local nr="$2"
|
||||||
|
local val="$3"
|
||||||
|
local path=$(t_trigger_path "$nr")
|
||||||
|
|
||||||
|
echo "$val" > "$path/$which"
|
||||||
|
}
|
||||||
|
|
||||||
t_trigger_show() {
|
t_trigger_show() {
|
||||||
local which="$1"
|
local which="$1"
|
||||||
local string="$2"
|
local string="$2"
|
||||||
@@ -276,9 +285,8 @@ t_trigger_show() {
|
|||||||
t_trigger_arm_silent() {
|
t_trigger_arm_silent() {
|
||||||
local which="$1"
|
local which="$1"
|
||||||
local nr="$2"
|
local nr="$2"
|
||||||
local path=$(t_trigger_path "$nr")
|
|
||||||
|
|
||||||
echo 1 > "$path/$which"
|
t_trigger_set "$which" "$nr" 1
|
||||||
}
|
}
|
||||||
|
|
||||||
t_trigger_arm() {
|
t_trigger_arm() {
|
||||||
|
|||||||
81
tests/golden/srch-safe-merge-pos
Normal file
81
tests/golden/srch-safe-merge-pos
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
== snapshot errors
|
||||||
|
== arm compaction triggers
|
||||||
|
trigger srch_compact_logs_pad_safe armed: 1
|
||||||
|
trigger srch_merge_stop_safe armed: 1
|
||||||
|
trigger srch_compact_logs_pad_safe armed: 1
|
||||||
|
trigger srch_merge_stop_safe armed: 1
|
||||||
|
trigger srch_compact_logs_pad_safe armed: 1
|
||||||
|
trigger srch_merge_stop_safe armed: 1
|
||||||
|
trigger srch_compact_logs_pad_safe armed: 1
|
||||||
|
trigger srch_merge_stop_safe armed: 1
|
||||||
|
trigger srch_compact_logs_pad_safe armed: 1
|
||||||
|
trigger srch_merge_stop_safe armed: 1
|
||||||
|
== force lots of small rotated log files for compaction
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
trigger srch_force_log_rotate armed: 1
|
||||||
|
== wait for compaction
|
||||||
|
== test and disarm compaction triggers
|
||||||
|
== verify triggers and errors
|
||||||
|
== cleanup
|
||||||
@@ -14,6 +14,7 @@ offline-extent-waiting.sh
|
|||||||
move-blocks.sh
|
move-blocks.sh
|
||||||
large-fragmented-free.sh
|
large-fragmented-free.sh
|
||||||
enospc.sh
|
enospc.sh
|
||||||
|
srch-safe-merge-pos.sh
|
||||||
srch-basic-functionality.sh
|
srch-basic-functionality.sh
|
||||||
simple-xattr-unit.sh
|
simple-xattr-unit.sh
|
||||||
totl-xattr-tag.sh
|
totl-xattr-tag.sh
|
||||||
|
|||||||
69
tests/tests/srch-safe-merge-pos.sh
Normal file
69
tests/tests/srch-safe-merge-pos.sh
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
#
|
||||||
|
# There was a bug where srch file compaction could get stuck if a
|
||||||
|
# partial compaction finished at the specific _SAFE_BYTES offset in a
|
||||||
|
# block. Resuming from that position would return an error and
|
||||||
|
# compaction would stop making forward progress.
|
||||||
|
#
|
||||||
|
# We use triggers to make sure that we create the circumstance where a
|
||||||
|
# sorted srch block ends at the _SAFE_BYTES offsset and that a merge
|
||||||
|
# request stops with a partial block at that specific offset. We then
|
||||||
|
# watch error counters to make sure compaction doesn't get stuck.
|
||||||
|
#
|
||||||
|
|
||||||
|
# forcing rotation, so just a few
|
||||||
|
NR=10
|
||||||
|
SEQF="%.20g"
|
||||||
|
COMPACT_NR=4
|
||||||
|
|
||||||
|
echo "== snapshot errors"
|
||||||
|
declare -a err
|
||||||
|
for nr in $(t_fs_nrs); do
|
||||||
|
err[$nr]=$(t_counter srch_compact_error $nr)
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "== arm compaction triggers"
|
||||||
|
for nr in $(t_fs_nrs); do
|
||||||
|
t_trigger_arm srch_compact_logs_pad_safe $nr
|
||||||
|
t_trigger_arm srch_merge_stop_safe $nr
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "== force lots of small rotated log files for compaction"
|
||||||
|
sv=$(t_server_nr)
|
||||||
|
iter=1
|
||||||
|
while [ $iter -le $((COMPACT_NR * COMPACT_NR * COMPACT_NR)) ]; do
|
||||||
|
t_trigger_arm srch_force_log_rotate $sv
|
||||||
|
|
||||||
|
seq -f "f-$iter-$SEQF" 1 10 | src/bulk_create_paths -S -d "$T_D0" > /dev/null
|
||||||
|
sync
|
||||||
|
|
||||||
|
test "$(t_trigger_get srch_force_log_rotate $sv)" == "0" || \
|
||||||
|
t_fail "srch_force_log_rotate didn't trigger"
|
||||||
|
|
||||||
|
((iter++))
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "== wait for compaction"
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
echo "== test and disarm compaction triggers"
|
||||||
|
pad=0
|
||||||
|
merge_stop=0
|
||||||
|
for nr in $(t_fs_nrs); do
|
||||||
|
test "$(t_trigger_get srch_compact_logs_pad_safe $nr)" == "0" && pad=1
|
||||||
|
t_trigger_set srch_compact_logs_pad_safe $nr 0
|
||||||
|
test "$(t_trigger_get srch_merge_stop_safe $nr)" == "0" && merge_stop=1
|
||||||
|
t_trigger_set srch_merge_stop_safe $nr 0
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "== verify triggers and errors"
|
||||||
|
test $pad == 1 || t_fail "srch_compact_logs_pad_safe didn't trigger"
|
||||||
|
test $merge_stop == 1 || t_fail "srch_merge_stop_safe didn't trigger"
|
||||||
|
for nr in $(t_fs_nrs); do
|
||||||
|
test "$(t_counter srch_compact_error $nr)" == "${err[$nr]}" || \
|
||||||
|
t_fail "srch_compact_error counter increased on mount $nr"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "== cleanup"
|
||||||
|
find "$T_D0" -type f -name 'f-*' -delete
|
||||||
|
|
||||||
|
t_pass
|
||||||
Reference in New Issue
Block a user