mirror of
https://github.com/versity/scoutfs.git
synced 2026-05-04 03:45:43 +00:00
Compare commits
2 Commits
main
...
auke/quota
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc56a69d8f | ||
|
|
c8bc42ccdb |
@@ -1114,6 +1114,7 @@ int scoutfs_quota_mod_rule(struct super_block *sb, bool is_add,
|
||||
goto release;
|
||||
}
|
||||
|
||||
wait_event(qtinf->waitq, !ruleset_is_busy(qtinf));
|
||||
scoutfs_quota_invalidate(sb);
|
||||
ret = 0;
|
||||
|
||||
@@ -1142,12 +1143,17 @@ void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called during cluster lock invalidation to indicate that the
|
||||
* ruleset is no longer protected by cluster locking and might have been
|
||||
* modified. We mark the ruleset invalid and free it once all readers
|
||||
* drain. The next check will acquire the cluster lock and read the
|
||||
* rules. Because this is called during invalidation this is serialized
|
||||
* with write holders of cluster locks so we can never see -EBUSY here.
|
||||
* Mark the cached ruleset invalid and free the previous one once readers
|
||||
* drain. Called from cluster lock invalidation and from quota rule
|
||||
* modification.
|
||||
*
|
||||
* Cluster lock invalidation runs only after the lock layer has drained
|
||||
* local READ users. Since EBUSY is set only while a reader holds READ,
|
||||
* the reader has already published by the time we run.
|
||||
*
|
||||
* Quota rule modification waits on the waitq for any in-flight reader
|
||||
* to publish before calling here, so the next check rebuilds against
|
||||
* the newly written rules rather than the reader's stale result.
|
||||
*/
|
||||
void scoutfs_quota_invalidate(struct super_block *sb)
|
||||
{
|
||||
@@ -1161,13 +1167,10 @@ void scoutfs_quota_invalidate(struct super_block *sb)
|
||||
|
||||
spin_lock(&qtinf->lock);
|
||||
rs = rcu_dereference_protected(qtinf->ruleset, lockdep_is_held(&qtinf->lock));
|
||||
if (rs != ERR_PTR(-EINVAL))
|
||||
if (rs == ERR_PTR(-ENOENT) || !IS_ERR(rs))
|
||||
rcu_assign_pointer(qtinf->ruleset, ERR_PTR(-EINVAL));
|
||||
spin_unlock(&qtinf->lock);
|
||||
|
||||
/* cluster locking should have prevented this */
|
||||
BUG_ON(rs == ERR_PTR(-EBUSY));
|
||||
|
||||
if (!IS_ERR(rs))
|
||||
call_rcu(&rs->rcu, free_ruleset_rcu);
|
||||
|
||||
|
||||
6
tests/golden/quota-invalidate-race
Normal file
6
tests/golden/quota-invalidate-race
Normal file
@@ -0,0 +1,6 @@
|
||||
== setup
|
||||
== concurrent quota mod and check across mounts
|
||||
== verify quota rules are consistent after race
|
||||
== verify file creation still works under quota
|
||||
file visible on mount 1
|
||||
== cleanup
|
||||
@@ -29,6 +29,7 @@ totl-xattr-tag.sh
|
||||
basic-xattr-indx.sh
|
||||
quota.sh
|
||||
totl-merge-read.sh
|
||||
quota-invalidate-race.sh
|
||||
lock-refleak.sh
|
||||
lock-shrink-consistency.sh
|
||||
lock-shrink-read-race.sh
|
||||
|
||||
70
tests/tests/quota-invalidate-race.sh
Normal file
70
tests/tests/quota-invalidate-race.sh
Normal file
@@ -0,0 +1,70 @@
|
||||
#
|
||||
# Regression for the BUG_ON in scoutfs_quota_invalidate when a concurrent
|
||||
# ruleset read on one mount races with a quota rule modification.
|
||||
#
|
||||
|
||||
t_require_mounts 2
|
||||
|
||||
TEST_UID=22222
|
||||
SET_UID="--ruid=$TEST_UID --euid=$TEST_UID"
|
||||
|
||||
echo "== setup"
|
||||
mkdir -p "$T_D0/dir"
|
||||
chown --quiet $TEST_UID "$T_D0/dir"
|
||||
|
||||
# totl xattr gives quota checks something to consult
|
||||
setfattr -n scoutfs.totl.test.1.1.1 -v 1 "$T_D0/dir"
|
||||
|
||||
echo "== concurrent quota mod and check across mounts"
|
||||
|
||||
(
|
||||
for i in $(seq 1 20); do
|
||||
scoutfs quota-add -p "$T_M0" \
|
||||
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
|
||||
scoutfs quota-del -p "$T_M0" \
|
||||
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
|
||||
done
|
||||
) &
|
||||
MOD_PID=$!
|
||||
|
||||
# same mount as the mod: races local read against invalidate
|
||||
(
|
||||
for i in $(seq 1 50); do
|
||||
setpriv $SET_UID touch "$T_D0/dir/race0_$i" 2>/dev/null
|
||||
rm -f "$T_D0/dir/race0_$i"
|
||||
done
|
||||
) &
|
||||
CHECK0_PID=$!
|
||||
|
||||
# other mount: drives cross-node lock traffic
|
||||
(
|
||||
for i in $(seq 1 50); do
|
||||
setpriv $SET_UID touch "$T_D1/dir/race1_$i" 2>/dev/null
|
||||
rm -f "$T_D1/dir/race1_$i"
|
||||
done
|
||||
) &
|
||||
CHECK1_PID=$!
|
||||
|
||||
t_quiet wait $MOD_PID
|
||||
t_quiet wait $CHECK0_PID
|
||||
t_quiet wait $CHECK1_PID
|
||||
|
||||
echo "== verify quota rules are consistent after race"
|
||||
scoutfs quota-wipe -p "$T_M0"
|
||||
scoutfs quota-list -p "$T_M0"
|
||||
|
||||
echo "== verify file creation still works under quota"
|
||||
scoutfs quota-add -p "$T_M0" -r "1 1,L,- 1,L,- 1,L,- I 999999 -"
|
||||
sync
|
||||
echo 1 > $(t_debugfs_path)/drop_weak_item_cache
|
||||
echo 1 > $(t_debugfs_path)/drop_quota_check_cache
|
||||
setpriv $SET_UID touch "$T_D0/dir/verify_file"
|
||||
test -f "$T_D1/dir/verify_file" && echo "file visible on mount 1"
|
||||
rm -f "$T_D0/dir/verify_file"
|
||||
scoutfs quota-wipe -p "$T_M0"
|
||||
|
||||
echo "== cleanup"
|
||||
setfattr -x scoutfs.totl.test.1.1.1 "$T_D0/dir"
|
||||
rm -rf "$T_D0/dir"
|
||||
|
||||
t_pass
|
||||
Reference in New Issue
Block a user