From 701f1a9538ca2f52b6a70de80cd382fd6cab640c Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Tue, 15 Nov 2022 12:34:52 -0800 Subject: [PATCH] Add test that checks duplicate meta_seq entries Add a quick test of the index items to make sure that rapid inode updates don't create duplicate meta_seq items. Signed-off-by: Zach Brown --- tests/golden/simple-inode-index | 1 + tests/tests/simple-inode-index.sh | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/tests/golden/simple-inode-index b/tests/golden/simple-inode-index index 3b2036e8..57f77933 100644 --- a/tests/golden/simple-inode-index +++ b/tests/golden/simple-inode-index @@ -7,3 +7,4 @@ found second == changing metadata must increase meta seq == changing contents must increase data seq == make sure dirtying doesn't livelock walk +== concurrent update attempts maintain single entries diff --git a/tests/tests/simple-inode-index.sh b/tests/tests/simple-inode-index.sh index 514a4348..414c0db3 100644 --- a/tests/tests/simple-inode-index.sh +++ b/tests/tests/simple-inode-index.sh @@ -103,4 +103,34 @@ while [ "$nr" -lt 100 ]; do ((nr++)) done +# +# make sure rapid concurrent metadata updates don't create multiple +# meta_seq entries +# +# we had a bug where deletion items created under concurrent_write locks +# could get versions older than the items they're deleting which were +# protected by read/write locks. +# +echo "== concurrent update attempts maintain single entries" +FILES=4 +nr=1 +while [ "$nr" -lt 10 ]; do + # touch a bunch of files in parallel from all mounts + for i in $(t_fs_nrs); do + eval path="\$T_D${i}" + seq -f "$path/file-%.0f" 1 $FILES | xargs touch & + done + wait || t_fail "concurrent file updates failed" + + # make sure no inodes have duplicate entries + sync + scoutfs walk-inodes -p "$T_D0" meta_seq -- 0 -1 | \ + grep -v "minor" | \ + awk '{print $4}' | \ + sort -n | uniq -c | \ + awk '($1 != 1)' | \ + sort -n + ((nr++)) +done + t_pass