Compare commits

..

5 Commits

Author SHA1 Message Date
Chris Kirby
96999044af Suppress another forced shutdown error message
The "server error emptying freed" error was causing a
fence-and-reclaim test failure. In this case, the error
was -ENOLINK, which we should ignore for messaging purposes.

Signed-off-by: Chris Kirby <ckirby@versity.com>
2026-04-20 11:48:31 -05:00
Chris Kirby
4b28a2fa8f Don't emit empty blocks in kway_merge()
It's possible for a srch compaction to collapse down to nothing
if given evenly paired create/delete entries. In this case, we
were emitting an empty block. This could cause problems for
search_sorted_file(), which assumes that every block it sees
has a valid first and last entry.

Fix this by keeping a temp entry and only emitting it if it differs
from the next entry in the block. Be sure to flush out a straggling
temp entry if we have one when we're done with the last block of
the merge.

Signed-off-by: Chris Kirby <ckirby@versity.com>
2026-04-20 11:48:31 -05:00
Chris Kirby
69e4a7c54b Change the looping logic in run-tests.sh
If a set of tests is provided, loop over the entire set the
requested number of times. Start and stop any requested tracing
across the set boundary.

Signed-off-by: Chris Kirby <ckirby@versity.com>
2026-04-20 11:33:45 -05:00
Chris Kirby
437594508a Improve tracing for get_file_block()
Print the first and last entries, the entry_nr and entry_bytes.

Signed-off-by: Chris Kirby <ckirby@versity.com>
2026-04-20 11:33:45 -05:00
Chris Kirby
4a92e49a61 Fix trigger firing race in srch-safe-merge-pos
Because the srch triggers are inherently async to the test,
we can't be sure they won't fire prematurely just because a
compact worker started running at an inconvenient time.

Make the trigger arming silent to avoid spurious test failures.
Move the trigger arming closer to the point of interest to
increase the chances that we're actually testing what we want.

Signed-off-by: Chris Kirby <ckirby@versity.com>
2026-04-20 11:33:45 -05:00
25 changed files with 309 additions and 671 deletions

View File

@@ -1,38 +1,6 @@
Versity ScoutFS Release Notes
=============================
---
v1.31
\
*May 5, 2026*
Fix race between modifying quota rules and internal reading of the rules
that tripped an assertion.
Fix a bug that could skip merging totl items under specific heavy write
loads. This could lead to merged totl items incorrectly tracking the
sum of all the contributing totl xattrs.
Fix many small low risk bugs in error paths that were found with code
analysis and testing.
---
v1.30
\
*Apr 21, 2026*
Fix a problem reading the accumulated totals of contributing .totl.
xattrs when log merging is in progress. The problem would have readers
of the totals calculate the sums incorrectly.
Fix a problem updating quota rules. There was a race where updates
could be corrupted if they happened while a transaction was being
written.
Fix a problem deleting files with .indx. xattrs. The internal indexing
metadata wouldn't be properly deleted so the files would still claim to
be present and visible in the index, though the file no longer existed.
---
v1.29
\

View File

@@ -218,7 +218,6 @@ static void block_free_work(struct work_struct *work)
llist_for_each_entry_safe(bp, tmp, deleted, free_node) {
block_free(sb, bp);
cond_resched();
}
}
@@ -468,6 +467,9 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
sector_t sector;
int ret = 0;
if (scoutfs_forcing_unmount(sb))
return -ENOLINK;
sector = bp->bl.blkno << (SCOUTFS_BLOCK_LG_SHIFT - 9);
WARN_ON_ONCE(bp->bl.blkno == U64_MAX);
@@ -478,17 +480,6 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
set_bit(BLOCK_BIT_IO_BUSY, &bp->bits);
block_get(bp);
/*
* A second thread may already be waiting on this block's completion
* after this thread won the race to submit the block. We exit through
* the block_end_io error path which sets BLOCK_BIT_ERROR and assures
* that other callers in the waitq get woken up.
*/
if (scoutfs_forcing_unmount(sb)) {
ret = -ENOLINK;
goto end_io;
}
blk_start_plug(&plug);
for (off = 0; off < SCOUTFS_BLOCK_LG_SIZE; off += PAGE_SIZE) {
@@ -526,7 +517,6 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
blk_finish_plug(&plug);
end_io:
/* let racing end_io know we're done */
block_end_io(sb, opf, bp, ret);

View File

@@ -549,7 +549,6 @@ retry:
goto out;
if (scoutfs_data_wait_found(&dw)) {
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
lock = NULL;
/* XXX callee locks instead? */
inode_unlock(inode);

View File

@@ -1739,43 +1739,6 @@ out:
return ret;
}
static long scoutfs_ioc_inject_totl_delta(struct file *file, unsigned long arg)
{
struct super_block *sb = file_inode(file)->i_sb;
struct scoutfs_ioctl_inject_totl_delta __user *uitd = (void __user *)arg;
struct scoutfs_ioctl_inject_totl_delta itd;
struct scoutfs_xattr_totl_val tval;
struct scoutfs_lock *lock = NULL;
struct scoutfs_key key;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&itd, uitd, sizeof(itd)))
return -EFAULT;
scoutfs_xattr_init_totl_key(&key, itd.name);
tval.total = cpu_to_le64((u64)itd.total);
tval.count = cpu_to_le64((u64)itd.count);
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &lock);
if (ret < 0)
goto out;
ret = scoutfs_hold_trans(sb, true);
if (ret < 0)
goto unlock;
ret = scoutfs_item_delta(sb, &key, &tval, sizeof(tval), lock);
scoutfs_release_trans(sb);
unlock:
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE_ONLY);
out:
return ret;
}
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -1827,8 +1790,6 @@ long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return scoutfs_ioc_read_xattr_index(file, arg);
case SCOUTFS_IOC_PUNCH_OFFLINE:
return scoutfs_ioc_punch_offline(file, arg);
case SCOUTFS_IOC_INJECT_TOTL_DELTA:
return scoutfs_ioc_inject_totl_delta(file, arg);
}
return -ENOTTY;

View File

@@ -876,17 +876,4 @@ struct scoutfs_ioctl_punch_offline {
#define SCOUTFS_IOC_PUNCH_OFFLINE \
_IOW(SCOUTFS_IOCTL_MAGIC, 24, struct scoutfs_ioctl_punch_offline)
/*
* Inject a signed (total, count) delta at the totl key @name (a, b, c
* match the trailing dotted u64s of a totl xattr name).
*/
struct scoutfs_ioctl_inject_totl_delta {
__u64 name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
__s64 total;
__s64 count;
};
#define SCOUTFS_IOC_INJECT_TOTL_DELTA \
_IOW(SCOUTFS_IOCTL_MAGIC, 25, struct scoutfs_ioctl_inject_totl_delta)
#endif

View File

@@ -813,7 +813,6 @@ int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
out:
if (!lock) {
kfree(ireq);
ret = scoutfs_client_lock_response(sb, net_id, nl);
BUG_ON(ret); /* lock server doesn't fence timed out client requests */
}
@@ -980,7 +979,7 @@ static bool lock_flags_invalid(int flags)
*/
static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
struct scoutfs_key *start, struct scoutfs_key *end,
u64 ino, struct scoutfs_lock **ret_lock)
struct scoutfs_lock **ret_lock)
{
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_lock *lock;
@@ -1028,8 +1027,6 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
/* the fast path where we can use the granted mode */
if (lock_modes_match(lock->mode, mode)) {
lock_inc_count(lock->users, mode);
lock->last_user_pid[mode] = task_pid_nr(current);
lock->last_user_ino[mode] = ino;
*ret_lock = lock;
ret = 0;
break;
@@ -1110,7 +1107,7 @@ int scoutfs_lock_ino(struct super_block *sb, enum scoutfs_lock_mode mode, int fl
end.sk_zone = SCOUTFS_FS_ZONE;
end.ski_ino = cpu_to_le64(ino | SCOUTFS_LOCK_INODE_GROUP_MASK);
return lock_key_range(sb, mode, flags, &start, &end, ino, ret_lock);
return lock_key_range(sb, mode, flags, &start, &end, ret_lock);
}
/*
@@ -1240,7 +1237,7 @@ int scoutfs_lock_rename(struct super_block *sb, enum scoutfs_lock_mode mode, int
.sk_type = SCOUTFS_RENAME_TYPE,
};
return lock_key_range(sb, mode, flags, &key, &key, 0, lock);
return lock_key_range(sb, mode, flags, &key, &key, lock);
}
/*
@@ -1288,7 +1285,7 @@ int scoutfs_lock_inode_index(struct super_block *sb, enum scoutfs_lock_mode mode
scoutfs_lock_get_index_item_range(type, major, ino, &start, &end);
return lock_key_range(sb, mode, 0, &start, &end, ino, ret_lock);
return lock_key_range(sb, mode, 0, &start, &end, ret_lock);
}
/*
@@ -1315,7 +1312,7 @@ int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int
end.sko_ino = cpu_to_le64(U64_MAX);
end.sk_type = SCOUTFS_ORPHAN_TYPE;
return lock_key_range(sb, mode, flags, &start, &end, ino, lock);
return lock_key_range(sb, mode, flags, &start, &end, lock);
}
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
@@ -1326,7 +1323,7 @@ int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode,
scoutfs_totl_set_range(&start, &end);
return lock_key_range(sb, mode, flags, &start, &end, 0, lock);
return lock_key_range(sb, mode, flags, &start, &end, lock);
}
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
@@ -1337,7 +1334,7 @@ int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode,
scoutfs_xattr_indx_get_range(&start, &end);
return lock_key_range(sb, mode, flags, &start, &end, 0, lock);
return lock_key_range(sb, mode, flags, &start, &end, lock);
}
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
@@ -1348,7 +1345,7 @@ int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int
scoutfs_quota_get_lock_range(&start, &end);
return lock_key_range(sb, mode, flags, &start, &end, 0, lock);
return lock_key_range(sb, mode, flags, &start, &end, lock);
}
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock, enum scoutfs_lock_mode mode)
@@ -1465,7 +1462,7 @@ static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
struct scoutfs_lock *lock =
container_of(ent, struct scoutfs_lock, tseq_entry);
seq_printf(m, "start "SK_FMT" end "SK_FMT" refresh_gen %llu mode %d waiters: rd %u wr %u wo %u users: rd %u wr %u wo %u ino: rd %llu wr %llu wo %llu pid: rd %d wr %d wo %d\n",
seq_printf(m, "start "SK_FMT" end "SK_FMT" refresh_gen %llu mode %d waiters: rd %u wr %u wo %u users: rd %u wr %u wo %u\n",
SK_ARG(&lock->start), SK_ARG(&lock->end),
lock->refresh_gen, lock->mode,
lock->waiters[SCOUTFS_LOCK_READ],
@@ -1473,13 +1470,7 @@ static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
lock->waiters[SCOUTFS_LOCK_WRITE_ONLY],
lock->users[SCOUTFS_LOCK_READ],
lock->users[SCOUTFS_LOCK_WRITE],
lock->users[SCOUTFS_LOCK_WRITE_ONLY],
lock->last_user_ino[SCOUTFS_LOCK_READ],
lock->last_user_ino[SCOUTFS_LOCK_WRITE],
lock->last_user_ino[SCOUTFS_LOCK_WRITE_ONLY],
lock->last_user_pid[SCOUTFS_LOCK_READ],
lock->last_user_pid[SCOUTFS_LOCK_WRITE],
lock->last_user_pid[SCOUTFS_LOCK_WRITE_ONLY]);
lock->users[SCOUTFS_LOCK_WRITE_ONLY]);
}
/*

View File

@@ -42,8 +42,6 @@ struct scoutfs_lock {
enum scoutfs_lock_mode invalidating_mode;
unsigned int waiters[SCOUTFS_LOCK_NR_MODES];
unsigned int users[SCOUTFS_LOCK_NR_MODES];
pid_t last_user_pid[SCOUTFS_LOCK_NR_MODES];
u64 last_user_ino[SCOUTFS_LOCK_NR_MODES];
struct scoutfs_tseq_entry tseq_entry;

View File

@@ -525,7 +525,7 @@ static int process_response(struct scoutfs_net_connection *conn,
struct super_block *sb = conn->sb;
struct message_send *msend;
scoutfs_net_response_t resp_func = NULL;
void *resp_data = NULL;
void *resp_data;
spin_lock(&conn->lock);
@@ -804,7 +804,7 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
if (invalid_message(conn, nh)) {
scoutfs_inc_counter(sb, net_recv_invalid_message);
ret = -EBADMSG;
goto out;
break;
}
data_len = le16_to_cpu(nh->data_len);

View File

@@ -1114,7 +1114,6 @@ int scoutfs_quota_mod_rule(struct super_block *sb, bool is_add,
goto release;
}
wait_event(qtinf->waitq, !ruleset_is_busy(qtinf));
scoutfs_quota_invalidate(sb);
ret = 0;
@@ -1143,17 +1142,12 @@ void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key
}
/*
* Mark the cached ruleset invalid and free the previous one once readers
* drain. Called from cluster lock invalidation and from quota rule
* modification.
*
* Cluster lock invalidation runs only after the lock layer has drained
* local READ users. Since EBUSY is set only while a reader holds READ,
* the reader has already published by the time we run.
*
* Quota rule modification waits on the waitq for any in-flight reader
* to publish before calling here, so the next check rebuilds against
* the newly written rules rather than the reader's stale result.
* This is called during cluster lock invalidation to indicate that the
* ruleset is no longer protected by cluster locking and might have been
* modified. We mark the ruleset invalid and free it once all readers
* drain. The next check will acquire the cluster lock and read the
* rules. Because this is called during invalidation this is serialized
* with write holders of cluster locks so we can never see -EBUSY here.
*/
void scoutfs_quota_invalidate(struct super_block *sb)
{
@@ -1167,10 +1161,13 @@ void scoutfs_quota_invalidate(struct super_block *sb)
spin_lock(&qtinf->lock);
rs = rcu_dereference_protected(qtinf->ruleset, lockdep_is_held(&qtinf->lock));
if (rs == ERR_PTR(-ENOENT) || !IS_ERR(rs))
if (rs != ERR_PTR(-EINVAL))
rcu_assign_pointer(qtinf->ruleset, ERR_PTR(-EINVAL));
spin_unlock(&qtinf->lock);
/* cluster locking should have prevented this */
BUG_ON(rs == ERR_PTR(-EBUSY));
if (!IS_ERR(rs))
call_rcu(&rs->rcu, free_ruleset_rcu);

View File

@@ -2620,24 +2620,60 @@ TRACE_EVENT(scoutfs_block_dirty_ref,
);
TRACE_EVENT(scoutfs_get_file_block,
TP_PROTO(struct super_block *sb, u64 blkno, int flags),
TP_PROTO(struct super_block *sb, u64 blkno, int flags,
struct scoutfs_srch_block *srb),
TP_ARGS(sb, blkno, flags),
TP_ARGS(sb, blkno, flags, srb),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, blkno)
__field(__u32, entry_nr)
__field(__u32, entry_bytes)
__field(int, flags)
__field(__u64, first_hash)
__field(__u64, first_ino)
__field(__u64, first_id)
__field(__u64, last_hash)
__field(__u64, last_ino)
__field(__u64, last_id)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->blkno = blkno;
__entry->entry_nr = __le32_to_cpu(srb->entry_nr);
__entry->entry_bytes = __le32_to_cpu(srb->entry_bytes);
__entry->flags = flags;
__entry->first_hash = __le64_to_cpu(srb->first.hash);
__entry->first_ino = __le64_to_cpu(srb->first.ino);
__entry->first_id = __le64_to_cpu(srb->first.id);
__entry->last_hash = __le64_to_cpu(srb->last.hash);
__entry->last_ino = __le64_to_cpu(srb->last.ino);
__entry->last_id = __le64_to_cpu(srb->last.id);
),
TP_printk(SCSBF" blkno %llu flags 0x%x",
SCSB_TRACE_ARGS, __entry->blkno, __entry->flags)
TP_printk(SCSBF" blkno %llu nr %u bytes %u flags 0x%x first_hash 0x%llx first_ino %llu first_id 0x%llx last_hash 0x%llx last_ino %llu last_id 0x%llx",
SCSB_TRACE_ARGS, __entry->blkno, __entry->entry_nr,
__entry->entry_bytes, __entry->flags,
__entry->first_hash, __entry->first_ino, __entry->first_id,
__entry->last_hash, __entry->last_ino, __entry->last_id)
);
TRACE_EVENT(scoutfs_srch_new_merge,
TP_PROTO(struct super_block *sb),
TP_ARGS(sb),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
),
TP_printk(SCSBF, SCSB_TRACE_ARGS)
);
TRACE_EVENT(scoutfs_block_stale,

View File

@@ -638,7 +638,7 @@ static void scoutfs_server_commit_func(struct work_struct *work)
ret = scoutfs_alloc_empty_list(sb, &server->alloc, &server->wri,
server->meta_freed,
server->other_freed);
if (ret) {
if (ret && ret != -ENOLINK) {
scoutfs_err(sb, "server error emptying freed: %d", ret);
goto out;
}
@@ -1077,7 +1077,8 @@ static int next_log_merge_range(struct super_block *sb, struct scoutfs_btree_roo
struct scoutfs_key key;
int ret;
init_log_merge_key(&key, SCOUTFS_LOG_MERGE_RANGE_ZONE, 0, 0);
key = *start;
key.sk_zone = SCOUTFS_LOG_MERGE_RANGE_ZONE;
scoutfs_key_set_ones(&rng->start);
do {

View File

@@ -443,7 +443,7 @@ out:
sfl->blocks = cpu_to_le64(blk + 1);
if (bl) {
trace_scoutfs_get_file_block(sb, bl->blkno, flags);
trace_scoutfs_get_file_block(sb, bl->blkno, flags, bl->data);
}
*bl_ret = bl;
@@ -1525,6 +1525,65 @@ static bool should_commit(struct super_block *sb, struct scoutfs_alloc *alloc,
scoutfs_alloc_meta_low(sb, alloc, nr);
}
static int alloc_srch_block(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_srch_file *sfl,
struct scoutfs_block **bl,
u64 blk)
{
DECLARE_SRCH_INFO(sb, srinf);
int ret;
if (atomic_read(&srinf->shutdown))
return -ESHUTDOWN;
/* could grow and dirty to a leaf */
if (should_commit(sb, alloc, wri, sfl->height + 1))
return -EAGAIN;
ret = get_file_block(sb, alloc, wri, sfl, GFB_INSERT | GFB_DIRTY,
blk, bl);
if (ret < 0)
return ret;
scoutfs_inc_counter(sb, srch_compact_dirty_block);
return 0;
}
static int emit_srch_entry(struct super_block *sb,
struct scoutfs_srch_file *sfl,
struct scoutfs_srch_block *srb,
struct scoutfs_srch_entry *sre,
u64 blk)
{
int ret;
ret = encode_entry(srb->entries + le32_to_cpu(srb->entry_bytes),
sre, &srb->tail);
if (WARN_ON_ONCE(ret <= 0)) {
/* shouldn't happen */
return -EIO;
}
if (srb->entry_bytes == 0) {
if (blk == 0)
sfl->first = *sre;
srb->first = *sre;
}
le32_add_cpu(&srb->entry_nr, 1);
le32_add_cpu(&srb->entry_bytes, ret);
srb->last = *sre;
srb->tail = *sre;
sfl->last = *sre;
le64_add_cpu(&sfl->entries, 1);
scoutfs_inc_counter(sb, srch_compact_entry);
return 0;
}
struct tourn_node {
struct scoutfs_srch_entry sre;
int ind;
@@ -1547,7 +1606,8 @@ static void tourn_update(struct tourn_node *tnodes, struct tourn_node *tn)
}
/* return the entry at the current position, can return enoent if done */
typedef int (*kway_get_t)(struct super_block *sb,
typedef int (*kway_get_t)(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_srch_entry *sre_ret, void *arg);
/* only called after _get returns 0, advances to next entry for _get */
typedef void (*kway_advance_t)(struct super_block *sb, void *arg);
@@ -1559,20 +1619,18 @@ static int kway_merge(struct super_block *sb,
kway_get_t kway_get, kway_advance_t kway_adv,
void **args, int nr, bool logs_input)
{
DECLARE_SRCH_INFO(sb, srinf);
struct scoutfs_srch_block *srb = NULL;
struct scoutfs_srch_entry last_tail;
struct scoutfs_srch_entry tmp_entry = {0};
struct scoutfs_block *bl = NULL;
struct tourn_node *tnodes;
struct tourn_node *leaves;
struct tourn_node *root;
struct tourn_node *tn;
int last_bytes = 0;
bool have_tmp = false;
int nr_parents;
int nr_nodes;
int empty = 0;
int ret = 0;
int diff;
u64 blk;
int ind;
int i;
@@ -1596,7 +1654,7 @@ static int kway_merge(struct super_block *sb,
for (i = 0; i < nr; i++) {
tn = &leaves[i];
tn->ind = i;
ret = kway_get(sb, &tn->sre, args[i]);
ret = kway_get(sb, NULL, NULL, &tn->sre, args[i]);
if (ret == 0) {
tourn_update(tnodes, &leaves[i]);
} else if (ret == -ENOENT) {
@@ -1606,97 +1664,68 @@ static int kway_merge(struct super_block *sb,
}
}
trace_scoutfs_srch_new_merge(sb);
/* always append new blocks */
blk = le64_to_cpu(sfl->blocks);
while (empty < nr) {
if (bl == NULL) {
if (atomic_read(&srinf->shutdown)) {
ret = -ESHUTDOWN;
goto out;
if (sre_cmp(&root->sre, &tmp_entry) != 0) {
if (have_tmp) {
if (bl == NULL) {
ret = alloc_srch_block(sb, alloc, wri,
sfl, &bl, blk);
if (ret < 0)
goto out;
srb = bl->data;
}
ret = emit_srch_entry(sb, sfl, srb, &tmp_entry,
blk);
if (ret < 0)
goto out;
if (le32_to_cpu(srb->entry_bytes) >
SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
scoutfs_block_put(sb, bl);
bl = NULL;
blk++;
memset(&tmp_entry, 0, sizeof(tmp_entry));
have_tmp = false;
continue;
}
/*
* end sorted block on _SAFE offset for
* testing
*/
if (bl && le32_to_cpu(srb->entry_nr) == 1 &&
logs_input &&
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
pad_entries_at_safe(sfl, srb);
scoutfs_block_put(sb, bl);
bl = NULL;
blk++;
memset(&tmp_entry, 0, sizeof(tmp_entry));
have_tmp = false;
continue;
}
}
/* could grow and dirty to a leaf */
if (should_commit(sb, alloc, wri, sfl->height + 1)) {
ret = 0;
goto out;
}
ret = get_file_block(sb, alloc, wri, sfl,
GFB_INSERT | GFB_DIRTY, blk, &bl);
if (ret < 0)
goto out;
srb = bl->data;
scoutfs_inc_counter(sb, srch_compact_dirty_block);
}
if (sre_cmp(&root->sre, &srb->last) != 0) {
last_bytes = le32_to_cpu(srb->entry_bytes);
last_tail = srb->last;
ret = encode_entry(srb->entries +
le32_to_cpu(srb->entry_bytes),
&root->sre, &srb->tail);
if (WARN_ON_ONCE(ret <= 0)) {
/* shouldn't happen */
ret = -EIO;
goto out;
}
if (srb->entry_bytes == 0) {
if (blk == 0)
sfl->first = root->sre;
srb->first = root->sre;
}
le32_add_cpu(&srb->entry_nr, 1);
le32_add_cpu(&srb->entry_bytes, ret);
srb->last = root->sre;
srb->tail = root->sre;
sfl->last = root->sre;
le64_add_cpu(&sfl->entries, 1);
ret = 0;
if (le32_to_cpu(srb->entry_bytes) >
SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
scoutfs_block_put(sb, bl);
bl = NULL;
blk++;
}
/* end sorted block on _SAFE offset for testing */
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
pad_entries_at_safe(sfl, srb);
scoutfs_block_put(sb, bl);
bl = NULL;
blk++;
}
scoutfs_inc_counter(sb, srch_compact_entry);
tmp_entry = root->sre;
have_tmp = true;
} else {
/*
* Duplicate entries indicate deletion so we
* undo the previously encoded entry and ignore
* undo the previously cached tmp entry and ignore
* this entry. This only happens within each
* block. Deletions can span block boundaries
* and will be filtered out by search and
* hopefully removed in future compactions.
*/
diff = le32_to_cpu(srb->entry_bytes) - last_bytes;
if (diff) {
memset(srb->entries + last_bytes, 0, diff);
if (srb->entry_bytes == 0) {
/* last_tail will be 0 */
if (blk == 0)
sfl->first = last_tail;
srb->first = last_tail;
}
le32_add_cpu(&srb->entry_nr, -1);
srb->entry_bytes = cpu_to_le32(last_bytes);
srb->last = last_tail;
srb->tail = last_tail;
sfl->last = last_tail;
le64_add_cpu(&sfl->entries, -1);
}
memset(&tmp_entry, 0, sizeof(tmp_entry));
have_tmp = false;
scoutfs_inc_counter(sb, srch_compact_removed_entry);
}
@@ -1705,7 +1734,7 @@ static int kway_merge(struct super_block *sb,
ind = root->ind;
tn = &leaves[ind];
kway_adv(sb, args[ind]);
ret = kway_get(sb, &tn->sre, args[ind]);
ret = kway_get(sb, alloc, wri, &tn->sre, args[ind]);
if (ret == -ENOENT) {
/* this index is done */
memset(&tn->sre, 0xff, sizeof(tn->sre));
@@ -1739,6 +1768,21 @@ static int kway_merge(struct super_block *sb,
/* could stream a final index.. arguably a small portion of work */
out:
if (have_tmp) {
bool emit = true;
if (bl == NULL) {
ret = alloc_srch_block(sb, alloc, wri, sfl, &bl, blk);
if (ret)
emit = false;
else
srb = bl->data;
}
if (emit)
ret = emit_srch_entry(sb, sfl, srb, &tmp_entry, blk);
}
scoutfs_block_put(sb, bl);
vfree(tnodes);
return ret;
@@ -1751,7 +1795,8 @@ static struct scoutfs_srch_entry *page_priv_sre(struct page *page)
return (struct scoutfs_srch_entry *)page_address(page) + page->private;
}
static int kway_get_page(struct super_block *sb,
static int kway_get_page(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_srch_entry *sre_ret, void *arg)
{
struct page *page = arg;
@@ -1958,7 +2003,8 @@ struct kway_file_reader {
int decoded_bytes;
};
static int kway_get_reader(struct super_block *sb,
static int kway_get_reader(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_srch_entry *sre_ret, void *arg)
{
struct kway_file_reader *rdr = arg;
@@ -1969,6 +2015,17 @@ static int kway_get_reader(struct super_block *sb,
return -ENOENT;
if (rdr->bl == NULL) {
/*
* Each new output block has the possibility of winding up with
* a straggler entry due to our need to look ahead an entry so
* that we don't wind up emitting an empty block.
*
* Make sure there's space to emit the straggler before starting
* another input block.
*/
if (alloc && should_commit(sb, alloc, wri, 16))
return -ENOENT;
ret = get_file_block(sb, NULL, NULL, rdr->sfl, 0, rdr->blk,
&rdr->bl);
if (ret < 0)
@@ -1982,6 +2039,11 @@ static int kway_get_reader(struct super_block *sb,
rdr->skip > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
/* XXX inconsistency */
scoutfs_err(sb, "blkno %llu pos %u vs %ld, skip %u, bytes %u",
__le64_to_cpu(srb->hdr.blkno),
rdr->pos, SCOUTFS_SRCH_BLOCK_SAFE_BYTES,
rdr->skip,
le32_to_cpu(srb->entry_bytes));
return -EIO;
}

1
tests/.gitignore vendored
View File

@@ -12,4 +12,3 @@ src/o_tmpfile_umask
src/o_tmpfile_linkat
src/mmap_stress
src/mmap_validate
src/totl-delta-inject

View File

@@ -15,8 +15,7 @@ BIN := src/createmany \
src/o_tmpfile_umask \
src/o_tmpfile_linkat \
src/mmap_stress \
src/mmap_validate \
src/totl-delta-inject
src/mmap_validate
DEPS := $(wildcard src/*.d)

View File

@@ -1,6 +0,0 @@
== set up file
== exercise read, write, and write-only modes
== verify FS-zone lock recorded read and write ino+pid
== verify orphan-zone lock recorded write-only ino+pid
== contend on a single inode with concurrent read and write loops
== verify both rd and wr slots populated by concurrent contention

View File

@@ -1,6 +0,0 @@
== setup
== concurrent quota mod and check across mounts
== verify quota rules are consistent after race
== verify file creation still works under quota
file visible on mount 1
== cleanup

View File

@@ -1,37 +1,7 @@
== initialize per-mount values
== arm compaction triggers
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_merge_stop_safe armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_merge_stop_safe armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_merge_stop_safe armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_merge_stop_safe armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_merge_stop_safe armed: 1
== compact more often
== create padded sorted inputs by forcing log rotation
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_compact_logs_pad_safe armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_force_log_rotate armed: 1
trigger srch_compact_logs_pad_safe armed: 1
== compaction of padded should stop at safe
== verify no compaction errors
== cleanup

View File

@@ -1,10 +0,0 @@
== setup three files contributing to totl 8888.0.0
== merge baseline into fs_root
8888.0.0 = 42, 3
== inject (+128, +2) unbalances totl 8888.0.0
8888.0.0 = 170, 5
== unlink f3 (value 32) produces a -32/-1 delta
8888.0.0 = 138, 4
== inject (-128, -2) restores accounting for the remaining files
8888.0.0 = 10, 2
== cleanup

View File

@@ -90,7 +90,7 @@ done
# set some T_ defaults
T_TRACE_DUMP="0"
T_TRACE_PRINTK="0"
T_TRACE_PRINTK=""
T_PORT_START="19700"
T_LOOP_ITER="1"
@@ -137,6 +137,9 @@ while true; do
test -n "$2" || die "-l must have a nr iterations argument"
test "$2" -eq "$2" 2>/dev/null || die "-l <nr> argument must be an integer"
T_LOOP_ITER="$2"
# when looping, break after first failure
T_ABORT="1"
shift
;;
-M)
@@ -399,31 +402,44 @@ if [ -n "$T_INSMOD" ]; then
cmd insmod "$T_MODULE"
fi
if [ -n "$T_TRACE_MULT" ]; then
# orig_trace_size=$(cat /sys/kernel/debug/tracing/buffer_size_kb)
orig_trace_size=1408
mult_trace_size=$((orig_trace_size * T_TRACE_MULT))
msg "increasing trace buffer size from $orig_trace_size KiB to $mult_trace_size KiB"
echo $mult_trace_size > /sys/kernel/debug/tracing/buffer_size_kb
fi
start_tracing() {
if [ -n "$T_TRACE_MULT" ]; then
orig_trace_size=1408
mult_trace_size=$((orig_trace_size * T_TRACE_MULT))
msg "increasing trace buffer size from $orig_trace_size KiB to $mult_trace_size KiB"
echo $mult_trace_size > /sys/kernel/debug/tracing/buffer_size_kb
fi
nr_globs=${#T_TRACE_GLOB[@]}
if [ $nr_globs -gt 0 ]; then
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
nr_globs=${#T_TRACE_GLOB[@]}
if [ $nr_globs -gt 0 ]; then
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
for g in "${T_TRACE_GLOB[@]}"; do
for e in /sys/kernel/debug/tracing/events/scoutfs/$g/enable; do
if test -w "$e"; then
echo 1 > "$e"
else
die "-t glob '$g' matched no scoutfs events"
fi
for g in "${T_TRACE_GLOB[@]}"; do
for e in /sys/kernel/debug/tracing/events/scoutfs/$g/enable; do
if test -w "$e"; then
echo 1 > "$e"
else
die "-t glob '$g' matched no scoutfs events"
fi
done
done
done
nr_events=$(cat /sys/kernel/debug/tracing/set_event | wc -l)
msg "enabled $nr_events trace events from $nr_globs -t globs"
fi
nr_events=$(cat /sys/kernel/debug/tracing/set_event | wc -l)
msg "enabled $nr_events trace events from $nr_globs -t globs"
fi
}
stop_tracing() {
if [ -n "$T_TRACE_GLOB" -o -n "$T_TRACE_PRINTK" ]; then
msg "saving traces and disabling tracing"
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
echo 0 > /sys/kernel/debug/tracing/options/trace_printk
cat /sys/kernel/debug/tracing/trace | gzip > "$T_RESULTS/traces.gz"
if [ -n "$orig_trace_size" ]; then
echo $orig_trace_size > /sys/kernel/debug/tracing/buffer_size_kb
fi
fi
}
if [ -n "$T_TRACE_PRINTK" ]; then
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
@@ -603,24 +619,26 @@ passed=0
skipped=0
failed=0
skipped_permitted=0
for t in $tests; do
# tests has basenames from sequence, get path and name
t="tests/$t"
test_name=$(basename "$t" | sed -e 's/.sh$//')
for iter in $(seq 1 $T_LOOP_ITER); do
# get stats from previous pass
last="$T_RESULTS/last-passed-test-stats"
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
test -n "$stats" && stats="last: $stats"
printf " %-30s $stats" "$test_name"
start_tracing
# mark in dmesg as to what test we are running
echo "run scoutfs test $test_name" > /dev/kmsg
for t in $tests; do
# tests has basenames from sequence, get path and name
t="tests/$t"
test_name=$(basename "$t" | sed -e 's/.sh$//')
# let the test get at its extra files
T_EXTRA="$T_TESTS/extra/$test_name"
# get stats from previous pass
last="$T_RESULTS/last-passed-test-stats"
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
test -n "$stats" && stats="last: $stats"
printf " %-30s $stats" "$test_name"
for iter in $(seq 1 $T_LOOP_ITER); do
# mark in dmesg as to what test we are running
echo "run scoutfs test $test_name" > /dev/kmsg
# let the test get at its extra files
T_EXTRA="$T_TESTS/extra/$test_name"
# create a temporary dir and file path for the test
T_TMPDIR="$T_RESULTS/tmp/$test_name"
@@ -710,55 +728,43 @@ for t in $tests; do
sts=$T_FAIL_STATUS
fi
# stop looping if we didn't pass
if [ "$sts" != "$T_PASS_STATUS" ]; then
break;
# show and record the result of the test
if [ "$sts" == "$T_PASS_STATUS" ]; then
echo " passed: $stats"
((passed++))
# save stats for passed test
grep -s -v "^$test_name " "$last" > "$last.tmp"
echo "$test_name $stats" >> "$last.tmp"
mv -f "$last.tmp" "$last"
elif [ "$sts" == "$T_SKIP_PERMITTED_STATUS" ]; then
echo " [ skipped (permitted): $message ]"
echo "$test_name skipped (permitted) $message " >> "$T_RESULTS/skip.log"
((skipped_permitted++))
elif [ "$sts" == "$T_SKIP_STATUS" ]; then
echo " [ skipped: $message ]"
echo "$test_name $message" >> "$T_RESULTS/skip.log"
((skipped++))
elif [ "$sts" == "$T_FAIL_STATUS" ]; then
echo " [ failed: $message ]"
echo "$test_name $message" >> "$T_RESULTS/fail.log"
((failed++))
if [ -n "$T_ABORT" ]; then
stop_tracing
die "aborting after first failure"
fi
fi
# record results for TAP format output
t_tap_progress $test_name $sts
((testcount++))
done
# show and record the result of the test
if [ "$sts" == "$T_PASS_STATUS" ]; then
echo " passed: $stats"
((passed++))
# save stats for passed test
grep -s -v "^$test_name " "$last" > "$last.tmp"
echo "$test_name $stats" >> "$last.tmp"
mv -f "$last.tmp" "$last"
elif [ "$sts" == "$T_SKIP_PERMITTED_STATUS" ]; then
echo " [ skipped (permitted): $message ]"
echo "$test_name skipped (permitted) $message " >> "$T_RESULTS/skip.log"
((skipped_permitted++))
elif [ "$sts" == "$T_SKIP_STATUS" ]; then
echo " [ skipped: $message ]"
echo "$test_name $message" >> "$T_RESULTS/skip.log"
((skipped++))
elif [ "$sts" == "$T_FAIL_STATUS" ]; then
echo " [ failed: $message ]"
echo "$test_name $message" >> "$T_RESULTS/fail.log"
((failed++))
test -n "$T_ABORT" && die "aborting after first failure"
fi
# record results for TAP format output
t_tap_progress $test_name $sts
((testcount++))
stop_tracing
done
msg "all tests run: $passed passed, $skipped skipped, $skipped_permitted skipped (permitted), $failed failed"
if [ -n "$T_TRACE_GLOB" -o -n "$T_TRACE_PRINTK" ]; then
msg "saving traces and disabling tracing"
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
echo 0 > /sys/kernel/debug/tracing/options/trace_printk
cat /sys/kernel/debug/tracing/trace > "$T_RESULTS/traces"
if [ -n "$orig_trace_size" ]; then
echo $orig_trace_size > /sys/kernel/debug/tracing/buffer_size_kb
fi
fi
if [ "$skipped" == 0 -a "$failed" == 0 ]; then
msg "all tests passed"
unmount_all

View File

@@ -29,10 +29,7 @@ totl-xattr-tag.sh
basic-xattr-indx.sh
quota.sh
totl-merge-read.sh
quota-invalidate-race.sh
totl-delta-inject.sh
lock-refleak.sh
lock-pid-ino.sh
lock-shrink-consistency.sh
lock-shrink-read-race.sh
lock-pr-cw-conflict.sh

View File

@@ -1,121 +0,0 @@
/*
* Test helper that calls SCOUTFS_IOC_INJECT_TOTL_DELTA to seed
* arbitrary totl deltas.
*
* Copyright (C) 2026 Versity Software, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <inttypes.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <linux/types.h>
#include "ioctl.h"
static void usage(const char *prog)
{
fprintf(stderr,
"Usage: %s <mountpoint> <a>.<b>.<c> <total> <count>\n",
prog);
exit(2);
}
static int parse_s64(const char *s, int64_t *out)
{
char *end;
int64_t v;
errno = 0;
v = strtoll(s, &end, 0);
if (errno || *end != '\0' || end == s)
return -1;
*out = v;
return 0;
}
/*
* Parse "<a>.<b>.<c>" into abc[0..2] (skxt_a, skxt_b, skxt_c). Each
* component must be a non-empty unsigned base-0 integer.
*/
static int parse_dotted_name(const char *s, uint64_t abc[3])
{
const char *p = s;
char *end;
int i;
for (i = 0; i < 3; i++) {
if (*p == '\0' || *p == '.')
return -1;
errno = 0;
abc[i] = strtoull(p, &end, 0);
if (errno || end == p)
return -1;
if (i < 2) {
if (*end != '.')
return -1;
p = end + 1;
} else {
if (*end != '\0')
return -1;
}
}
return 0;
}
int main(int argc, char **argv)
{
struct scoutfs_ioctl_inject_totl_delta itd = {{0,}};
uint64_t abc[3];
int64_t total, count;
int fd;
int ret;
if (argc != 5)
usage(argv[0]);
if (parse_dotted_name(argv[2], abc) ||
parse_s64(argv[3], &total) ||
parse_s64(argv[4], &count)) {
fprintf(stderr, "could not parse arguments\n");
usage(argv[0]);
}
itd.name[0] = abc[0];
itd.name[1] = abc[1];
itd.name[2] = abc[2];
itd.total = total;
itd.count = count;
fd = open(argv[1], O_RDONLY | O_DIRECTORY);
if (fd < 0) {
fprintf(stderr, "open(%s): %s\n", argv[1], strerror(errno));
return 1;
}
ret = ioctl(fd, SCOUTFS_IOC_INJECT_TOTL_DELTA, &itd);
if (ret < 0) {
fprintf(stderr,
"INJECT_TOTL_DELTA(%" PRIu64 ".%" PRIu64 ".%" PRIu64
", total=%" PRId64 ", count=%" PRId64 "): %s\n",
abc[0], abc[1], abc[2], total, count, strerror(errno));
close(fd);
return 1;
}
close(fd);
return 0;
}

View File

@@ -1,68 +0,0 @@
#
# verify debugfs client_locks reports per-mode last-user PID and inode.
#
t_require_commands stat touch awk rm
FILE="$T_D0/file"
echo "== set up file"
touch "$FILE"
INO=$(stat -c %i "$FILE")
GROUP_START=$(( INO & ~1023 ))
echo "== exercise read, write, and write-only modes"
t_quiet stat "$FILE"
echo data > "$FILE"
rm -f "$FILE"
echo "== verify FS-zone lock recorded read and write ino+pid"
ERR=$(awk -v group="$GROUP_START" -v ino="$INO" '
$2 == "16." group ".0.0.0.0" {
if ($25 != ino || $32 <= 0)
print "read mode: ino=" $25 " pid=" $32 " want ino=" ino " pid>0"
if ($27 != ino || $34 <= 0)
print "write mode: ino=" $27 " pid=" $34 " want ino=" ino " pid>0"
found = 1
}
END { if (!found) print "no FS-zone client_locks line for group " group }
' < "$(t_debugfs_path)/client_locks")
[ -n "$ERR" ] && t_fail "$ERR"
echo "== verify orphan-zone lock recorded write-only ino+pid"
ERR=$(awk -v ino="$INO" '
$2 == "8.0.4.0.0.0" {
if ($29 != ino || $36 <= 0)
print "write-only mode: ino=" $29 " pid=" $36 " want ino=" ino " pid>0"
found = 1
}
END { if (!found) print "no orphan-zone client_locks line" }
' < "$(t_debugfs_path)/client_locks")
[ -n "$ERR" ] && t_fail "$ERR"
echo "== contend on a single inode with concurrent read and write loops"
FILE2="$T_D0/file2"
touch "$FILE2"
INO2=$(stat -c %i "$FILE2")
GROUP2=$(( INO2 & ~1023 ))
for i in $(seq 1 5); do t_quiet stat "$FILE2"; done &
RPID=$!
for i in $(seq 1 5); do echo $i > "$FILE2"; done &
WPID=$!
wait $RPID $WPID
echo "== verify both rd and wr slots populated by concurrent contention"
ERR=$(awk -v group="$GROUP2" -v ino="$INO2" '
$2 == "16." group ".0.0.0.0" {
if ($25 != ino || $32 <= 0)
print "concurrent read: ino=" $25 " pid=" $32 " want ino=" ino " pid>0"
if ($27 != ino || $34 <= 0)
print "concurrent write: ino=" $27 " pid=" $34 " want ino=" ino " pid>0"
found = 1
}
END { if (!found) print "no FS-zone client_locks line for group " group }
' < "$(t_debugfs_path)/client_locks")
[ -n "$ERR" ] && t_fail "$ERR"
t_pass

View File

@@ -1,70 +0,0 @@
#
# Regression for the BUG_ON in scoutfs_quota_invalidate when a concurrent
# ruleset read on one mount races with a quota rule modification.
#
t_require_mounts 2
TEST_UID=22222
SET_UID="--ruid=$TEST_UID --euid=$TEST_UID"
echo "== setup"
mkdir -p "$T_D0/dir"
chown --quiet $TEST_UID "$T_D0/dir"
# totl xattr gives quota checks something to consult
setfattr -n scoutfs.totl.test.1.1.1 -v 1 "$T_D0/dir"
echo "== concurrent quota mod and check across mounts"
(
for i in $(seq 1 20); do
scoutfs quota-add -p "$T_M0" \
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
scoutfs quota-del -p "$T_M0" \
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
done
) &
MOD_PID=$!
# same mount as the mod: races local read against invalidate
(
for i in $(seq 1 50); do
setpriv $SET_UID touch "$T_D0/dir/race0_$i" 2>/dev/null
rm -f "$T_D0/dir/race0_$i"
done
) &
CHECK0_PID=$!
# other mount: drives cross-node lock traffic
(
for i in $(seq 1 50); do
setpriv $SET_UID touch "$T_D1/dir/race1_$i" 2>/dev/null
rm -f "$T_D1/dir/race1_$i"
done
) &
CHECK1_PID=$!
t_quiet wait $MOD_PID
t_quiet wait $CHECK0_PID
t_quiet wait $CHECK1_PID
echo "== verify quota rules are consistent after race"
scoutfs quota-wipe -p "$T_M0"
scoutfs quota-list -p "$T_M0"
echo "== verify file creation still works under quota"
scoutfs quota-add -p "$T_M0" -r "1 1,L,- 1,L,- 1,L,- I 999999 -"
sync
echo 1 > $(t_debugfs_path)/drop_weak_item_cache
echo 1 > $(t_debugfs_path)/drop_quota_check_cache
setpriv $SET_UID touch "$T_D0/dir/verify_file"
test -f "$T_D1/dir/verify_file" && echo "file visible on mount 1"
rm -f "$T_D0/dir/verify_file"
scoutfs quota-wipe -p "$T_M0"
echo "== cleanup"
setfattr -x scoutfs.totl.test.1.1.1 "$T_D0/dir"
rm -rf "$T_D0/dir"
t_pass

View File

@@ -31,8 +31,8 @@ trap restore_compact_delay EXIT
echo "== arm compaction triggers"
for nr in $(t_fs_nrs); do
t_trigger_arm srch_compact_logs_pad_safe $nr
t_trigger_arm srch_merge_stop_safe $nr
t_trigger_arm_silent srch_compact_logs_pad_safe $nr
t_trigger_arm_silent srch_merge_stop_safe $nr
done
echo "== compact more often"
@@ -44,11 +44,12 @@ echo "== create padded sorted inputs by forcing log rotation"
sv=$(t_server_nr)
for i in $(seq 1 $COMPACT_NR); do
for j in $(seq 1 $COMPACT_NR); do
t_trigger_arm srch_force_log_rotate $sv
seq -f "f-$i-$j-$SEQF" 1 10 | \
bulk_create_paths -X "scoutfs.srch.t-srch-safe-merge-pos" -d "$T_D0" > \
/dev/null
t_trigger_arm_silent srch_force_log_rotate $sv
sync
test "$(t_trigger_get srch_force_log_rotate $sv)" == "0" || \
@@ -59,7 +60,7 @@ for i in $(seq 1 $COMPACT_NR); do
while test $padded == 0 && sleep .5; do
for nr in $(t_fs_nrs); do
if [ "$(t_trigger_get srch_compact_logs_pad_safe $nr)" == "0" ]; then
t_trigger_arm srch_compact_logs_pad_safe $nr
t_trigger_arm_silent srch_compact_logs_pad_safe $nr
padded=1
break
fi

View File

@@ -1,43 +0,0 @@
#
# Exercise the SCOUTFS_IOC_INJECT_TOTL_DELTA ioctl that injects totl
# deltas directly via totl-delta-inject(1).
#
t_require_commands setfattr scoutfs sync rm touch totl-delta-inject
# force a log merge then read-xattr-totals filtered to our own keys
read_totals()
{
t_force_log_merge
sync
echo 1 > $(t_debugfs_path)/drop_weak_item_cache
scoutfs read-xattr-totals -p "$T_M0" | \
grep -E '^8888\.' || true
}
echo "== setup three files contributing to totl 8888.0.0"
touch "$T_D0/f1" "$T_D0/f2" "$T_D0/f3"
setfattr -n scoutfs.totl.inj.8888.0.0 -v 2 "$T_D0/f1"
setfattr -n scoutfs.totl.inj.8888.0.0 -v 8 "$T_D0/f2"
setfattr -n scoutfs.totl.inj.8888.0.0 -v 32 "$T_D0/f3"
echo "== merge baseline into fs_root"
read_totals
echo "== inject (+128, +2) unbalances totl 8888.0.0"
totl-delta-inject "$T_M0" 8888.0.0 128 2
read_totals
echo "== unlink f3 (value 32) produces a -32/-1 delta"
rm -f "$T_D0/f3"
read_totals
echo "== inject (-128, -2) restores accounting for the remaining files"
totl-delta-inject "$T_M0" 8888.0.0 -128 -2
read_totals
echo "== cleanup"
rm -f "$T_D0/f1" "$T_D0/f2"
read_totals
t_pass