From add1da10dc5cc4866df809377f386c31bdadef1c Mon Sep 17 00:00:00 2001 From: Auke Kok Date: Tue, 17 Mar 2026 12:27:57 -0700 Subject: [PATCH] Add test for stale seq in merge delta combining. merge_read_item() fails to update found->seq when combining delta items from multiple finalized log trees. Add a test case to replicate the conditions of this issue. Each of 5 mounts sets totl value 1 on 2500 shared keys, giving an expected total of 5 per key. Any total > 5 proves double-counting from a stale seq. The log_merge_force_partial trigger forces many partial merges per cycle, creating the conditions where stale-seq items get spliced into fs_root while finalized logs still exist. Parallel readers on all mounts race against this window to detect double-counted values. Signed-off-by: Auke Kok --- tests/golden/totl-merge-read | 3 ++ tests/sequence | 1 + tests/tests/totl-merge-read.sh | 50 ++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+) create mode 100644 tests/golden/totl-merge-read create mode 100644 tests/tests/totl-merge-read.sh diff --git a/tests/golden/totl-merge-read b/tests/golden/totl-merge-read new file mode 100644 index 00000000..931671e6 --- /dev/null +++ b/tests/golden/totl-merge-read @@ -0,0 +1,3 @@ +== setup +expected 4681 +== cleanup diff --git a/tests/sequence b/tests/sequence index e296553a..bdd8aba7 100644 --- a/tests/sequence +++ b/tests/sequence @@ -27,6 +27,7 @@ simple-xattr-unit.sh retention-basic.sh totl-xattr-tag.sh quota.sh +totl-merge-read.sh lock-refleak.sh lock-shrink-consistency.sh lock-shrink-read-race.sh diff --git a/tests/tests/totl-merge-read.sh b/tests/tests/totl-merge-read.sh new file mode 100644 index 00000000..8c37c93a --- /dev/null +++ b/tests/tests/totl-merge-read.sh @@ -0,0 +1,50 @@ +# +# Test that merge_read_item() correctly updates the sequence number when +# combining delta items from multiple finalized log trees. Each mount +# sets a totl value in its own 3-bit lane (powers of 8) so that any +# double-counting overflows the lane and is caught by: or(v, exp) != exp. +# + +t_require_commands setfattr scoutfs +t_require_mounts 5 + +echo "== setup" +for nr in $(t_fs_nrs); do + d=$(eval echo \$T_D$nr) + for i in $(seq 1 2500); do : > "$d/f$nr$i"; done +done +sync +t_force_log_merge + +vals=(1 8 64 512 4096) +expected=4681 +n=0 +for nr in $(t_fs_nrs); do + d=$(eval echo \$T_D$nr) + v=${vals[$((n++))]} + for i in $(seq 1 2500); do + setfattr -n "scoutfs.totl.t.$i.0.0" -v $v "$d/f$nr$i" + done +done + +t_trigger_arm_silent log_merge_force_partial $(t_server_nr) + +bad="$T_TMPDIR/bad" +for nr in $(t_fs_nrs); do + ( while true; do + echo 1 > "$(t_debugfs_path $nr)/drop_weak_item_cache" + scoutfs read-xattr-totals -p "$(eval echo \$T_M$nr)" | \ + awk -F'[ =,]+' -v e=$expected 'or($2+0,e) != e' + done ) >> "$bad" & +done + +echo "expected $expected" +t_force_log_merge +t_silent_kill $(jobs -p) +test -s "$bad" && echo "double-counted:" && cat "$bad" + +echo "== cleanup" +for nr in $(t_fs_nrs); do + find "$(eval echo \$T_D$nr)" -name "f$nr*" -delete +done +t_pass