mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-10 21:50:20 +00:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b2afa61b8 | ||
|
|
222ba2cede | ||
|
|
c7e97eeb1f | ||
|
|
21c070b42d | ||
|
|
77fbf92968 | ||
|
|
d5c699c3b4 | ||
|
|
b56b8e502c | ||
|
|
5ff372561d | ||
|
|
bdecee5e5d | ||
|
|
707e1b2d59 | ||
|
|
006f429f72 |
@@ -1,6 +1,18 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.18
|
||||
\
|
||||
*Nov 7, 2023*
|
||||
|
||||
Fixed a bug where background srch file compaction could stop making
|
||||
forward progress if a partial compaction operation was committed at a
|
||||
specific byte offset in a block. This would cause srch file searches to
|
||||
be progressively more expensive over time. Once this fix is running
|
||||
background compaction will resume, bringing the cost of searches back
|
||||
down.
|
||||
|
||||
---
|
||||
v1.17
|
||||
\
|
||||
|
||||
@@ -91,6 +91,7 @@ do { \
|
||||
struct server_info {
|
||||
struct super_block *sb;
|
||||
spinlock_t lock;
|
||||
seqlock_t seqlock;
|
||||
wait_queue_head_t waitq;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
@@ -132,11 +133,9 @@ struct server_info {
|
||||
struct mutex mounted_clients_mutex;
|
||||
|
||||
/* stable super stored from commits, given in locks and rpcs */
|
||||
seqcount_t stable_seqcount;
|
||||
struct scoutfs_super_block stable_super;
|
||||
|
||||
/* serializing and get and set volume options */
|
||||
seqcount_t volopt_seqcount;
|
||||
struct mutex volopt_mutex;
|
||||
struct scoutfs_volume_options volopt;
|
||||
|
||||
@@ -182,7 +181,7 @@ static bool get_volopt_val(struct server_info *server, int nr, u64 *val)
|
||||
unsigned seq;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&server->volopt_seqcount);
|
||||
seq = read_seqbegin(&server->seqlock);
|
||||
if ((le64_to_cpu(server->volopt.set_bits) & bit)) {
|
||||
is_set = true;
|
||||
*val = le64_to_cpup(opt);
|
||||
@@ -190,7 +189,7 @@ static bool get_volopt_val(struct server_info *server, int nr, u64 *val)
|
||||
is_set = false;
|
||||
*val = 0;
|
||||
};
|
||||
} while (read_seqcount_retry(&server->volopt_seqcount, seq));
|
||||
} while (read_seqretry(&server->seqlock, seq));
|
||||
|
||||
return is_set;
|
||||
}
|
||||
@@ -506,7 +505,7 @@ static void get_stable(struct super_block *sb, struct scoutfs_super_block *super
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&server->stable_seqcount);
|
||||
seq = read_seqbegin(&server->seqlock);
|
||||
if (super)
|
||||
*super = server->stable_super;
|
||||
if (roots) {
|
||||
@@ -514,7 +513,7 @@ static void get_stable(struct super_block *sb, struct scoutfs_super_block *super
|
||||
roots->logs_root = server->stable_super.logs_root;
|
||||
roots->srch_root = server->stable_super.srch_root;
|
||||
}
|
||||
} while (read_seqcount_retry(&server->stable_seqcount, seq));
|
||||
} while (read_seqretry(&server->seqlock, seq));
|
||||
}
|
||||
|
||||
u64 scoutfs_server_seq(struct super_block *sb)
|
||||
@@ -548,11 +547,9 @@ void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq)
|
||||
|
||||
static void set_stable_super(struct server_info *server, struct scoutfs_super_block *super)
|
||||
{
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&server->stable_seqcount);
|
||||
write_seqlock(&server->seqlock);
|
||||
server->stable_super = *super;
|
||||
write_seqcount_end(&server->stable_seqcount);
|
||||
preempt_enable();
|
||||
write_sequnlock(&server->seqlock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1969,9 +1966,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
||||
ret = scoutfs_srch_get_compact(sb, &server->alloc, &server->wri,
|
||||
&super->srch_root, rid, sc);
|
||||
mutex_unlock(&server->srch_mutex);
|
||||
if (ret == 0 && sc->nr == 0)
|
||||
ret = -ENOENT;
|
||||
if (ret < 0)
|
||||
if (ret < 0 || (ret == 0 && sc->nr == 0))
|
||||
goto apply;
|
||||
|
||||
mutex_lock(&server->alloc_mutex);
|
||||
@@ -3073,9 +3068,9 @@ static int server_get_volopt(struct super_block *sb, struct scoutfs_net_connecti
|
||||
}
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&server->volopt_seqcount);
|
||||
seq = read_seqbegin(&server->seqlock);
|
||||
volopt = server->volopt;
|
||||
} while (read_seqcount_retry(&server->volopt_seqcount, seq));
|
||||
} while (read_seqretry(&server->seqlock, seq));
|
||||
|
||||
out:
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, &volopt, sizeof(volopt));
|
||||
@@ -3144,12 +3139,12 @@ static int server_set_volopt(struct super_block *sb, struct scoutfs_net_connecti
|
||||
apply:
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
|
||||
write_seqcount_begin(&server->volopt_seqcount);
|
||||
write_seqlock(&server->seqlock);
|
||||
if (ret == 0)
|
||||
server->volopt = super->volopt;
|
||||
else
|
||||
super->volopt = server->volopt;
|
||||
write_seqcount_end(&server->volopt_seqcount);
|
||||
write_sequnlock(&server->seqlock);
|
||||
|
||||
mutex_unlock(&server->volopt_mutex);
|
||||
out:
|
||||
@@ -3192,12 +3187,12 @@ static int server_clear_volopt(struct super_block *sb, struct scoutfs_net_connec
|
||||
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
|
||||
write_seqcount_begin(&server->volopt_seqcount);
|
||||
write_seqlock(&server->seqlock);
|
||||
if (ret == 0)
|
||||
server->volopt = super->volopt;
|
||||
else
|
||||
super->volopt = server->volopt;
|
||||
write_seqcount_end(&server->volopt_seqcount);
|
||||
write_sequnlock(&server->seqlock);
|
||||
|
||||
mutex_unlock(&server->volopt_mutex);
|
||||
out:
|
||||
@@ -4336,9 +4331,9 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* update volume options early, possibly for use during startup */
|
||||
write_seqcount_begin(&server->volopt_seqcount);
|
||||
write_seqlock(&server->seqlock);
|
||||
server->volopt = super->volopt;
|
||||
write_seqcount_end(&server->volopt_seqcount);
|
||||
write_sequnlock(&server->seqlock);
|
||||
|
||||
atomic64_set(&server->seq_atomic, le64_to_cpu(super->seq));
|
||||
set_stable_super(server, super);
|
||||
@@ -4478,6 +4473,7 @@ int scoutfs_server_setup(struct super_block *sb)
|
||||
|
||||
server->sb = sb;
|
||||
spin_lock_init(&server->lock);
|
||||
seqlock_init(&server->seqlock);
|
||||
init_waitqueue_head(&server->waitq);
|
||||
INIT_WORK(&server->work, scoutfs_server_worker);
|
||||
server->status = SERVER_DOWN;
|
||||
@@ -4492,8 +4488,6 @@ int scoutfs_server_setup(struct super_block *sb)
|
||||
INIT_WORK(&server->log_merge_free_work, server_log_merge_free_work);
|
||||
mutex_init(&server->srch_mutex);
|
||||
mutex_init(&server->mounted_clients_mutex);
|
||||
seqcount_init(&server->stable_seqcount);
|
||||
seqcount_init(&server->volopt_seqcount);
|
||||
mutex_init(&server->volopt_mutex);
|
||||
INIT_WORK(&server->fence_pending_recov_work, fence_pending_recov_worker);
|
||||
INIT_DELAYED_WORK(&server->reclaim_dwork, reclaim_worker);
|
||||
|
||||
120
kmod/src/srch.c
120
kmod/src/srch.c
@@ -30,6 +30,7 @@
|
||||
#include "client.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "triggers.h"
|
||||
|
||||
/*
|
||||
* This srch subsystem gives us a way to find inodes that have a given
|
||||
@@ -520,6 +521,95 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Padded entries are encoded in pairs after an existing entry. All of
|
||||
* the pairs cancel each other out by all readers (the second encoding
|
||||
* looks like deletion) so they aren't visible to the first/last bounds of
|
||||
* the block or file.
|
||||
*/
|
||||
static int append_padded_entry(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb, struct scoutfs_srch_entry *sre)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = encode_entry(srb->entries + le32_to_cpu(srb->entry_bytes),
|
||||
sre, &srb->tail);
|
||||
if (ret > 0) {
|
||||
srb->tail = *sre;
|
||||
le32_add_cpu(&srb->entry_nr, 1);
|
||||
le32_add_cpu(&srb->entry_bytes, ret);
|
||||
le64_add_cpu(&sfl->entries, 1);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called by a testing trigger to create a very specific case of
|
||||
* encoded entry offsets. We want the last entry in the block to start
|
||||
* precisely at the _SAFE_BYTES offset.
|
||||
*
|
||||
* This is called when there is a single existing entry in the block.
|
||||
* We have the entire block to work with. We encode pairs of matching
|
||||
* entries. This hides them from readers (both searches and merging) as
|
||||
* they're interpreted as creation and deletion and are deleted. We use
|
||||
* the existing hash value of the first entry in the block but then set
|
||||
* the inode to an impossibly large number so it doesn't interfere with
|
||||
* anything.
|
||||
*
|
||||
* To hit the specific offset we very carefully manage the amount of
|
||||
* bytes of change between fields in the entry. We know that if we
|
||||
* change all the byte of the ino and id we end up with a 20 byte
|
||||
* (2+8+8,2) encoding of the pair of entries. To have the last entry
|
||||
* start at the _SAFE_POS offset we know that the final 20 byte pair
|
||||
* encoding needs to end at 2 bytes (second entry encoding) after the
|
||||
* _SAFE_POS offset.
|
||||
*
|
||||
* So as we encode pairs we watch the delta of our current offset from
|
||||
* that desired final offset of 2 past _SAFE_POS. If we're a multiple
|
||||
* of 20 away then we encode the full 20 byte pairs. If we're not, then
|
||||
* we drop a byte to encode 19 bytes. That'll slowly change the offset
|
||||
* to be a multiple of 20 again while encoding large entries.
|
||||
*/
|
||||
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb)
|
||||
{
|
||||
struct scoutfs_srch_entry sre;
|
||||
u32 target;
|
||||
s32 diff;
|
||||
u64 hash;
|
||||
u64 ino;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
hash = le64_to_cpu(srb->tail.hash);
|
||||
ino = le64_to_cpu(srb->tail.ino) | (1ULL << 62);
|
||||
id = le64_to_cpu(srb->tail.id);
|
||||
|
||||
target = SCOUTFS_SRCH_BLOCK_SAFE_BYTES + 2;
|
||||
|
||||
while ((diff = target - le32_to_cpu(srb->entry_bytes)) > 0) {
|
||||
ino ^= 1ULL << (7 * 8);
|
||||
if (diff % 20 == 0) {
|
||||
id ^= 1ULL << (7 * 8);
|
||||
} else {
|
||||
id ^= 1ULL << (6 * 8);
|
||||
}
|
||||
|
||||
sre.hash = cpu_to_le64(hash);
|
||||
sre.ino = cpu_to_le64(ino);
|
||||
sre.id = cpu_to_le64(id);
|
||||
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
if (ret == 0)
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
diff = target - le32_to_cpu(srb->entry_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is dropping an ino/id because the tracking rbtree is full.
|
||||
* This loses information so we can't return any entries at or after the
|
||||
@@ -987,6 +1077,9 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (sfl->ref.blkno && !force && scoutfs_trigger(sb, SRCH_FORCE_LOG_ROTATE))
|
||||
force = true;
|
||||
|
||||
if (sfl->ref.blkno == 0 ||
|
||||
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
||||
return 0;
|
||||
@@ -1462,7 +1555,7 @@ static int kway_merge(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_file *sfl,
|
||||
kway_get_t kway_get, kway_advance_t kway_adv,
|
||||
void **args, int nr)
|
||||
void **args, int nr, bool logs_input)
|
||||
{
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
struct scoutfs_srch_block *srb = NULL;
|
||||
@@ -1567,6 +1660,15 @@ static int kway_merge(struct super_block *sb,
|
||||
blk++;
|
||||
}
|
||||
|
||||
/* end sorted block on _SAFE offset for testing */
|
||||
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
|
||||
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
|
||||
pad_entries_at_safe(sfl, blk, srb);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
blk++;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, srch_compact_entry);
|
||||
|
||||
} else {
|
||||
@@ -1609,6 +1711,8 @@ static int kway_merge(struct super_block *sb,
|
||||
empty++;
|
||||
ret = 0;
|
||||
} else if (ret < 0) {
|
||||
if (ret == -ENOANO) /* just testing trigger */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1816,7 +1920,7 @@ static int compact_logs(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_page, kway_adv_page,
|
||||
args, nr_pages);
|
||||
args, nr_pages, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1874,12 +1978,18 @@ static int kway_get_reader(struct super_block *sb,
|
||||
srb = rdr->bl->data;
|
||||
|
||||
if (rdr->pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
|
||||
/* XXX inconsistency */
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rdr->decoded_bytes == 0 && rdr->pos == SCOUTFS_SRCH_BLOCK_SAFE_BYTES &&
|
||||
scoutfs_trigger(sb, SRCH_MERGE_STOP_SAFE)) {
|
||||
/* only used in testing */
|
||||
return -ENOANO;
|
||||
}
|
||||
|
||||
/* decode entry, possibly skipping start of the block */
|
||||
while (rdr->decoded_bytes == 0 || rdr->pos < rdr->skip) {
|
||||
ret = decode_entry(srb->entries + rdr->pos,
|
||||
@@ -1969,7 +2079,7 @@ static int compact_sorted(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_reader,
|
||||
kway_adv_reader, args, nr);
|
||||
kway_adv_reader, args, nr, false);
|
||||
|
||||
sc->flags |= SCOUTFS_SRCH_COMPACT_FLAG_DONE;
|
||||
for (i = 0; i < nr; i++) {
|
||||
@@ -2179,7 +2289,7 @@ out:
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
||||
delay = (sc->nr > 0 && ret == 0) ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
|
||||
|
||||
@@ -39,6 +39,9 @@ struct scoutfs_triggers {
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||
};
|
||||
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||
SCOUTFS_TRIGGER_NR,
|
||||
};
|
||||
|
||||
@@ -265,6 +265,15 @@ t_trigger_get() {
|
||||
cat "$(t_trigger_path "$nr")/$which"
|
||||
}
|
||||
|
||||
t_trigger_set() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local val="$3"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo "$val" > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_show() {
|
||||
local which="$1"
|
||||
local string="$2"
|
||||
@@ -276,9 +285,8 @@ t_trigger_show() {
|
||||
t_trigger_arm_silent() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo 1 > "$path/$which"
|
||||
t_trigger_set "$which" "$nr" 1
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
|
||||
81
tests/golden/srch-safe-merge-pos
Normal file
81
tests/golden/srch-safe-merge-pos
Normal file
@@ -0,0 +1,81 @@
|
||||
== snapshot errors
|
||||
== arm compaction triggers
|
||||
trigger srch_compact_logs_pad_safe armed: 1
|
||||
trigger srch_merge_stop_safe armed: 1
|
||||
trigger srch_compact_logs_pad_safe armed: 1
|
||||
trigger srch_merge_stop_safe armed: 1
|
||||
trigger srch_compact_logs_pad_safe armed: 1
|
||||
trigger srch_merge_stop_safe armed: 1
|
||||
trigger srch_compact_logs_pad_safe armed: 1
|
||||
trigger srch_merge_stop_safe armed: 1
|
||||
trigger srch_compact_logs_pad_safe armed: 1
|
||||
trigger srch_merge_stop_safe armed: 1
|
||||
== force lots of small rotated log files for compaction
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
trigger srch_force_log_rotate armed: 1
|
||||
== wait for compaction
|
||||
== test and disarm compaction triggers
|
||||
== verify triggers and errors
|
||||
== cleanup
|
||||
@@ -14,6 +14,7 @@ offline-extent-waiting.sh
|
||||
move-blocks.sh
|
||||
large-fragmented-free.sh
|
||||
enospc.sh
|
||||
srch-safe-merge-pos.sh
|
||||
srch-basic-functionality.sh
|
||||
simple-xattr-unit.sh
|
||||
totl-xattr-tag.sh
|
||||
|
||||
@@ -11,8 +11,13 @@ FILE="$T_D0/file"
|
||||
# final block as we truncated past it.
|
||||
#
|
||||
echo "== truncate writes zeroed partial end of file block"
|
||||
yes | dd of="$FILE" bs=8K count=1 status=none
|
||||
yes | dd of="$FILE" bs=8K count=1 status=none iflag=fullblock
|
||||
sync
|
||||
|
||||
# not passing iflag=fullblock causes the file occasionally to just be
|
||||
# 4K, so just to be safe we should at least check size once
|
||||
test `stat --printf="%s\n" "$FILE"` -eq 8192 || t_fail "test file incorrect start size"
|
||||
|
||||
truncate -s 6K "$FILE"
|
||||
truncate -s 12K "$FILE"
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
69
tests/tests/srch-safe-merge-pos.sh
Normal file
69
tests/tests/srch-safe-merge-pos.sh
Normal file
@@ -0,0 +1,69 @@
|
||||
#
|
||||
# There was a bug where srch file compaction could get stuck if a
|
||||
# partial compaction finished at the specific _SAFE_BYTES offset in a
|
||||
# block. Resuming from that position would return an error and
|
||||
# compaction would stop making forward progress.
|
||||
#
|
||||
# We use triggers to make sure that we create the circumstance where a
|
||||
# sorted srch block ends at the _SAFE_BYTES offsset and that a merge
|
||||
# request stops with a partial block at that specific offset. We then
|
||||
# watch error counters to make sure compaction doesn't get stuck.
|
||||
#
|
||||
|
||||
# forcing rotation, so just a few
|
||||
NR=10
|
||||
SEQF="%.20g"
|
||||
COMPACT_NR=4
|
||||
|
||||
echo "== snapshot errors"
|
||||
declare -a err
|
||||
for nr in $(t_fs_nrs); do
|
||||
err[$nr]=$(t_counter srch_compact_error $nr)
|
||||
done
|
||||
|
||||
echo "== arm compaction triggers"
|
||||
for nr in $(t_fs_nrs); do
|
||||
t_trigger_arm srch_compact_logs_pad_safe $nr
|
||||
t_trigger_arm srch_merge_stop_safe $nr
|
||||
done
|
||||
|
||||
echo "== force lots of small rotated log files for compaction"
|
||||
sv=$(t_server_nr)
|
||||
iter=1
|
||||
while [ $iter -le $((COMPACT_NR * COMPACT_NR * COMPACT_NR)) ]; do
|
||||
t_trigger_arm srch_force_log_rotate $sv
|
||||
|
||||
seq -f "f-$iter-$SEQF" 1 10 | src/bulk_create_paths -S -d "$T_D0" > /dev/null
|
||||
sync
|
||||
|
||||
test "$(t_trigger_get srch_force_log_rotate $sv)" == "0" || \
|
||||
t_fail "srch_force_log_rotate didn't trigger"
|
||||
|
||||
((iter++))
|
||||
done
|
||||
|
||||
echo "== wait for compaction"
|
||||
sleep 15
|
||||
|
||||
echo "== test and disarm compaction triggers"
|
||||
pad=0
|
||||
merge_stop=0
|
||||
for nr in $(t_fs_nrs); do
|
||||
test "$(t_trigger_get srch_compact_logs_pad_safe $nr)" == "0" && pad=1
|
||||
t_trigger_set srch_compact_logs_pad_safe $nr 0
|
||||
test "$(t_trigger_get srch_merge_stop_safe $nr)" == "0" && merge_stop=1
|
||||
t_trigger_set srch_merge_stop_safe $nr 0
|
||||
done
|
||||
|
||||
echo "== verify triggers and errors"
|
||||
test $pad == 1 || t_fail "srch_compact_logs_pad_safe didn't trigger"
|
||||
test $merge_stop == 1 || t_fail "srch_merge_stop_safe didn't trigger"
|
||||
for nr in $(t_fs_nrs); do
|
||||
test "$(t_counter srch_compact_error $nr)" == "${err[$nr]}" || \
|
||||
t_fail "srch_compact_error counter increased on mount $nr"
|
||||
done
|
||||
|
||||
echo "== cleanup"
|
||||
find "$T_D0" -type f -name 'f-*' -delete
|
||||
|
||||
t_pass
|
||||
Reference in New Issue
Block a user