Compare commits

..

3 Commits

Author SHA1 Message Date
Auke Kok
52e1c24d0c Add quota invalidate race regression test
Run concurrent quota add/del on one mount against rapid file
creation and deletion on both mounts to exercise the race fixed
in the previous commit.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-04-30 17:41:34 -07:00
Auke Kok
66a90d1b9f Fix quota invalidate race with concurrent ruleset read
A quota check holding the quota cluster lock for READ sets the
cached ruleset to EBUSY while loading rules from the btree.  A
quota mod holding the same lock for WRITE then calls
scoutfs_quota_invalidate() and trips
BUG_ON(rs == ERR_PTR(-EBUSY)).  READ and WRITE cluster locks are
compatible on the same mount, so the BUG_ON's premise was wrong.

Drop the BUG_ON: the existing EINVAL swap already handles EBUSY
correctly.  Have the reader check the ruleset is still EBUSY before
installing its result so a racing invalidation wins and the stale
result is discarded.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-04-30 17:41:34 -07:00
Zach Brown
af31b9f1e8 Merge pull request #306 from versity/zab/v1.30
v1.30 Release
2026-04-22 10:43:17 -07:00
4 changed files with 89 additions and 10 deletions

View File

@@ -730,7 +730,14 @@ out:
}
spin_lock(&qtinf->lock);
rcu_assign_pointer(qtinf->ruleset, rs);
/* drop our result if a racing invalidation cleared EBUSY */
if (rcu_dereference_protected(qtinf->ruleset,
lockdep_is_held(&qtinf->lock)) != ERR_PTR(-EBUSY)) {
if (!IS_ERR(rs))
free_ruleset(rs);
} else {
rcu_assign_pointer(qtinf->ruleset, rs);
}
spin_unlock(&qtinf->lock);
wake_up(&qtinf->waitq);
}
@@ -1142,12 +1149,10 @@ void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key
}
/*
* This is called during cluster lock invalidation to indicate that the
* ruleset is no longer protected by cluster locking and might have been
* modified. We mark the ruleset invalid and free it once all readers
* drain. The next check will acquire the cluster lock and read the
* rules. Because this is called during invalidation this is serialized
* with write holders of cluster locks so we can never see -EBUSY here.
* Mark the cached ruleset invalid and free the previous one once readers
* drain. Called from cluster lock invalidation and from quota rule
* modification; a concurrent local reader may have set EBUSY, in which
* case it will detect the EINVAL on completion and discard its result.
*/
void scoutfs_quota_invalidate(struct super_block *sb)
{
@@ -1165,9 +1170,6 @@ void scoutfs_quota_invalidate(struct super_block *sb)
rcu_assign_pointer(qtinf->ruleset, ERR_PTR(-EINVAL));
spin_unlock(&qtinf->lock);
/* cluster locking should have prevented this */
BUG_ON(rs == ERR_PTR(-EBUSY));
if (!IS_ERR(rs))
call_rcu(&rs->rcu, free_ruleset_rcu);

View File

@@ -0,0 +1,6 @@
== setup
== concurrent quota mod and check across mounts
== verify quota rules are consistent after race
== verify file creation still works under quota
file visible on mount 1
== cleanup

View File

@@ -29,6 +29,7 @@ totl-xattr-tag.sh
basic-xattr-indx.sh
quota.sh
totl-merge-read.sh
quota-invalidate-race.sh
lock-refleak.sh
lock-shrink-consistency.sh
lock-shrink-read-race.sh

View File

@@ -0,0 +1,70 @@
#
# Regression for the BUG_ON in scoutfs_quota_invalidate when a concurrent
# ruleset read on one mount races with a quota rule modification.
#
t_require_mounts 2
TEST_UID=22222
SET_UID="--ruid=$TEST_UID --euid=$TEST_UID"
echo "== setup"
mkdir -p "$T_D0/dir"
chown --quiet $TEST_UID "$T_D0/dir"
# totl xattr gives quota checks something to consult
setfattr -n scoutfs.totl.test.1.1.1 -v 1 "$T_D0/dir"
echo "== concurrent quota mod and check across mounts"
(
for i in $(seq 1 20); do
scoutfs quota-add -p "$T_M0" \
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
scoutfs quota-del -p "$T_M0" \
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
done
) &
MOD_PID=$!
# same mount as the mod: races local read against invalidate
(
for i in $(seq 1 50); do
setpriv $SET_UID touch "$T_D0/dir/race0_$i" 2>/dev/null
rm -f "$T_D0/dir/race0_$i"
done
) &
CHECK0_PID=$!
# other mount: drives cross-node lock traffic
(
for i in $(seq 1 50); do
setpriv $SET_UID touch "$T_D1/dir/race1_$i" 2>/dev/null
rm -f "$T_D1/dir/race1_$i"
done
) &
CHECK1_PID=$!
t_quiet wait $MOD_PID
t_quiet wait $CHECK0_PID
t_quiet wait $CHECK1_PID
echo "== verify quota rules are consistent after race"
scoutfs quota-wipe -p "$T_M0"
scoutfs quota-list -p "$T_M0"
echo "== verify file creation still works under quota"
scoutfs quota-add -p "$T_M0" -r "1 1,L,- 1,L,- 1,L,- I 999999 -"
sync
echo 1 > $(t_debugfs_path)/drop_weak_item_cache
echo 1 > $(t_debugfs_path)/drop_quota_check_cache
setpriv $SET_UID touch "$T_D0/dir/verify_file"
test -f "$T_D1/dir/verify_file" && echo "file visible on mount 1"
rm -f "$T_D0/dir/verify_file"
scoutfs quota-wipe -p "$T_M0"
echo "== cleanup"
setfattr -x scoutfs.totl.test.1.1.1 "$T_D0/dir"
rm -rf "$T_D0/dir"
t_pass