Compare commits

..

1 Commits

Author SHA1 Message Date
Auke Kok
cf635cd0da Ramping up data preallocation
Ramps up data preallocation based on the number of online
blocks. This results in a simple 2<<n block allocation pattern
until n=11 (2048) - the default value of data_prealloc_blocks.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-04-21 18:15:42 -04:00
7 changed files with 34 additions and 112 deletions

View File

@@ -1,23 +1,6 @@
Versity ScoutFS Release Notes
=============================
---
v1.30
\
*Apr 21, 2026*
Fix a problem reading the accumulated totals of contributing .totl.
xattrs when log merging is in progress. The problem would have readers
of the totals calculate the sums incorrectly.
Fix a problem updating quota rules. There was a race where updates
could be corrupted if they happened while a transaction was being
written.
Fix a problem deleting files with .indx. xattrs. The internal indexing
metadata wouldn't be properly deleted so the files would still claim to
be present and visible in the index, though the file no longer existed.
---
v1.29
\

View File

@@ -384,6 +384,12 @@ static inline u64 ext_last(struct scoutfs_extent *ext)
* This can waste a lot of space for small or sparse files but is
* reasonable when a file population is known to be large and dense but
* known to be written with non-streaming write patterns.
*
* In either strategy, preallocation ramps up proportionally with the
* file's online block count rather than jumping to the full prealloc
* size. This avoids overallocation for small files in mixed-size
* workloads while still allowing large files to benefit from full
* preallocation.
*/
static int alloc_block(struct super_block *sb, struct inode *inode,
struct scoutfs_extent *ext, u64 iblock,
@@ -400,6 +406,7 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
struct scoutfs_extent found;
struct scoutfs_extent pre = {0,};
bool undo_pre = false;
bool have_onoff = false;
u64 blkno = 0;
u64 online;
u64 offline;
@@ -445,6 +452,7 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
* blocks.
*/
scoutfs_inode_get_onoff(inode, &online, &offline);
have_onoff = true;
if (iblock > 1 && iblock == online) {
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
iblock, 1, &found);
@@ -491,6 +499,16 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
/* overall prealloc limit */
count = min_t(u64, count, opts.data_prealloc_blocks);
/*
* Ramp preallocation up proportionally with the file's online
* block count rather than jumping to the full prealloc size.
*/
if (!ext->len) {
if (!have_onoff)
scoutfs_inode_get_onoff(inode, &online, &offline);
count = max_t(u64, 1, min(count, online));
}
ret = scoutfs_alloc_data(sb, datinf->alloc, datinf->wri,
&datinf->dalloc, count, &blkno, &count);
if (ret < 0)

View File

@@ -730,14 +730,7 @@ out:
}
spin_lock(&qtinf->lock);
/* drop our result if a racing invalidation cleared EBUSY */
if (rcu_dereference_protected(qtinf->ruleset,
lockdep_is_held(&qtinf->lock)) != ERR_PTR(-EBUSY)) {
if (!IS_ERR(rs))
free_ruleset(rs);
} else {
rcu_assign_pointer(qtinf->ruleset, rs);
}
rcu_assign_pointer(qtinf->ruleset, rs);
spin_unlock(&qtinf->lock);
wake_up(&qtinf->waitq);
}
@@ -1149,10 +1142,12 @@ void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key
}
/*
* Mark the cached ruleset invalid and free the previous one once readers
* drain. Called from cluster lock invalidation and from quota rule
* modification; a concurrent local reader may have set EBUSY, in which
* case it will detect the EINVAL on completion and discard its result.
* This is called during cluster lock invalidation to indicate that the
* ruleset is no longer protected by cluster locking and might have been
* modified. We mark the ruleset invalid and free it once all readers
* drain. The next check will acquire the cluster lock and read the
* rules. Because this is called during invalidation this is serialized
* with write holders of cluster locks so we can never see -EBUSY here.
*/
void scoutfs_quota_invalidate(struct super_block *sb)
{
@@ -1170,6 +1165,9 @@ void scoutfs_quota_invalidate(struct super_block *sb)
rcu_assign_pointer(qtinf->ruleset, ERR_PTR(-EINVAL));
spin_unlock(&qtinf->lock);
/* cluster locking should have prevented this */
BUG_ON(rs == ERR_PTR(-EBUSY));
if (!IS_ERR(rs))
call_rcu(&rs->rcu, free_ruleset_rcu);

View File

@@ -8,10 +8,10 @@
/mnt/test/test/data-prealloc/file-1: extents: 32
/mnt/test/test/data-prealloc/file-2: extents: 32
== any writes to region prealloc get full extents
/mnt/test/test/data-prealloc/file-1: extents: 4
/mnt/test/test/data-prealloc/file-2: extents: 4
/mnt/test/test/data-prealloc/file-1: extents: 4
/mnt/test/test/data-prealloc/file-2: extents: 4
/mnt/test/test/data-prealloc/file-1: extents: 8
/mnt/test/test/data-prealloc/file-2: extents: 8
/mnt/test/test/data-prealloc/file-1: extents: 8
/mnt/test/test/data-prealloc/file-2: extents: 8
== streaming offline writes get full extents either way
/mnt/test/test/data-prealloc/file-1: extents: 4
/mnt/test/test/data-prealloc/file-2: extents: 4
@@ -20,8 +20,8 @@
== goofy preallocation amounts work
/mnt/test/test/data-prealloc/file-1: extents: 6
/mnt/test/test/data-prealloc/file-2: extents: 6
/mnt/test/test/data-prealloc/file-1: extents: 6
/mnt/test/test/data-prealloc/file-2: extents: 6
/mnt/test/test/data-prealloc/file-1: extents: 10
/mnt/test/test/data-prealloc/file-2: extents: 10
/mnt/test/test/data-prealloc/file-1: extents: 3
/mnt/test/test/data-prealloc/file-2: extents: 3
== block writes into region allocs hole

View File

@@ -1,6 +0,0 @@
== setup
== concurrent quota mod and check across mounts
== verify quota rules are consistent after race
== verify file creation still works under quota
file visible on mount 1
== cleanup

View File

@@ -29,7 +29,6 @@ totl-xattr-tag.sh
basic-xattr-indx.sh
quota.sh
totl-merge-read.sh
quota-invalidate-race.sh
lock-refleak.sh
lock-shrink-consistency.sh
lock-shrink-read-race.sh

View File

@@ -1,70 +0,0 @@
#
# Regression for the BUG_ON in scoutfs_quota_invalidate when a concurrent
# ruleset read on one mount races with a quota rule modification.
#
t_require_mounts 2
TEST_UID=22222
SET_UID="--ruid=$TEST_UID --euid=$TEST_UID"
echo "== setup"
mkdir -p "$T_D0/dir"
chown --quiet $TEST_UID "$T_D0/dir"
# totl xattr gives quota checks something to consult
setfattr -n scoutfs.totl.test.1.1.1 -v 1 "$T_D0/dir"
echo "== concurrent quota mod and check across mounts"
(
for i in $(seq 1 20); do
scoutfs quota-add -p "$T_M0" \
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
scoutfs quota-del -p "$T_M0" \
-r "1 1,L,- 1,L,- $i,L,- I 999999 -" 2>/dev/null
done
) &
MOD_PID=$!
# same mount as the mod: races local read against invalidate
(
for i in $(seq 1 50); do
setpriv $SET_UID touch "$T_D0/dir/race0_$i" 2>/dev/null
rm -f "$T_D0/dir/race0_$i"
done
) &
CHECK0_PID=$!
# other mount: drives cross-node lock traffic
(
for i in $(seq 1 50); do
setpriv $SET_UID touch "$T_D1/dir/race1_$i" 2>/dev/null
rm -f "$T_D1/dir/race1_$i"
done
) &
CHECK1_PID=$!
t_quiet wait $MOD_PID
t_quiet wait $CHECK0_PID
t_quiet wait $CHECK1_PID
echo "== verify quota rules are consistent after race"
scoutfs quota-wipe -p "$T_M0"
scoutfs quota-list -p "$T_M0"
echo "== verify file creation still works under quota"
scoutfs quota-add -p "$T_M0" -r "1 1,L,- 1,L,- 1,L,- I 999999 -"
sync
echo 1 > $(t_debugfs_path)/drop_weak_item_cache
echo 1 > $(t_debugfs_path)/drop_quota_check_cache
setpriv $SET_UID touch "$T_D0/dir/verify_file"
test -f "$T_D1/dir/verify_file" && echo "file visible on mount 1"
rm -f "$T_D0/dir/verify_file"
scoutfs quota-wipe -p "$T_M0"
echo "== cleanup"
setfattr -x scoutfs.totl.test.1.1.1 "$T_D0/dir"
rm -rf "$T_D0/dir"
t_pass