scoutfs-tests: add createmany-rename-large-dir

Add a test that randomly renames entries in a single large directory.
This has caught bugs in the reservation of allocator resources for
client transactions.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2020-11-13 14:13:09 -08:00
committed by Andy Grover
parent 84bb170e3a
commit 7b2310442b
3 changed files with 42 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
== create large directory with 1220608 files
== randomly renaming 5000 files

View File

@@ -13,6 +13,7 @@ lock-pr-cw-conflict.sh
lock-revoke-getcwd.sh
createmany-parallel.sh
createmany-large-names.sh
createmany-rename-large-dir.sh
stage-release-race-alloc.sh
basic-posix-consistency.sh
dirent-consistency.sh

View File

@@ -0,0 +1,39 @@
#
# We've had bugs where client transactions weren't properly being
# committed as their allocators ran out of resources for the amount of
# log btree dirtying they'd need to do for the size of their dirty item
# cache pages.
#
# This stresses those heuristics by trying to maximize the number of
# btree blocks dirtied for each dirty item cache page. We create an
# enormous directory and then randomly rename entries in it.
#
# With the bad client commit heuristics this would reliably fail before
# a few thousand renames.
#
t_require_commands createmany mv mkdir
# 298 dirents per 64k block, 4096 max meta avail -- often fewer
NR=$((298 * 4096))
RENAMES=5000
DIR="$T_D0/dir/"
echo "== create large directory with $NR files"
t_quiet mkdir -p "$DIR"
./src/createmany -o "$DIR/f-" $NR > "$T_TMP.createmany.stdout"
echo "== randomly renaming $RENAMES files"
for i in $(seq 1 $RENAMES); do
rnd=$(((1$RANDOM$RANDOM) % NR))
orig="$DIR/f-$rnd"
tmp="$DIR/f-$rnd.$i"
mv "$orig" "$tmp"
mv "$tmp" "$orig"
done
t_quiet rm -rf "$DIR"
t_pass