Compare commits

..

3 Commits

Author SHA1 Message Date
Auke Kok
ac1ab8e87f Add cond_resched in block_free_work
I'm seeing consistent CPU soft lockups in block_free_work on
my bare metal system that aren't reached by VM instances. The
reason is that the bare metal machine has a ton more memory
available causing the block free work queue to grow much
larger in size, and then it has so much work that it can take 30+
seconds before it goes through it all.

This is all with a debug kernel. A non debug kernel will likely
zoom through the outstanding work here at a much faster rate.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-04-22 13:39:32 -07:00
Zach Brown
af31b9f1e8 Merge pull request #306 from versity/zab/v1.30
v1.30 Release
2026-04-22 10:43:17 -07:00
Zach Brown
ad65116d8f v1.30 Release
Finish the release notes for the 1.30 release.

Signed-off-by: Zach Brown <zab@versity.com>
2026-04-21 16:43:12 -07:00
4 changed files with 24 additions and 24 deletions

View File

@@ -1,6 +1,23 @@
Versity ScoutFS Release Notes
=============================
---
v1.30
\
*Apr 21, 2026*
Fix a problem reading the accumulated totals of contributing .totl.
xattrs when log merging is in progress. The problem would have readers
of the totals calculate the sums incorrectly.
Fix a problem updating quota rules. There was a race where updates
could be corrupted if they happened while a transaction was being
written.
Fix a problem deleting files with .indx. xattrs. The internal indexing
metadata wouldn't be properly deleted so the files would still claim to
be present and visible in the index, though the file no longer existed.
---
v1.29
\

View File

@@ -218,6 +218,7 @@ static void block_free_work(struct work_struct *work)
llist_for_each_entry_safe(bp, tmp, deleted, free_node) {
block_free(sb, bp);
cond_resched();
}
}

View File

@@ -384,12 +384,6 @@ static inline u64 ext_last(struct scoutfs_extent *ext)
* This can waste a lot of space for small or sparse files but is
* reasonable when a file population is known to be large and dense but
* known to be written with non-streaming write patterns.
*
* In either strategy, preallocation ramps up proportionally with the
* file's online block count rather than jumping to the full prealloc
* size. This avoids overallocation for small files in mixed-size
* workloads while still allowing large files to benefit from full
* preallocation.
*/
static int alloc_block(struct super_block *sb, struct inode *inode,
struct scoutfs_extent *ext, u64 iblock,
@@ -406,7 +400,6 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
struct scoutfs_extent found;
struct scoutfs_extent pre = {0,};
bool undo_pre = false;
bool have_onoff = false;
u64 blkno = 0;
u64 online;
u64 offline;
@@ -452,7 +445,6 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
* blocks.
*/
scoutfs_inode_get_onoff(inode, &online, &offline);
have_onoff = true;
if (iblock > 1 && iblock == online) {
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
iblock, 1, &found);
@@ -499,16 +491,6 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
/* overall prealloc limit */
count = min_t(u64, count, opts.data_prealloc_blocks);
/*
* Ramp preallocation up proportionally with the file's online
* block count rather than jumping to the full prealloc size.
*/
if (!ext->len) {
if (!have_onoff)
scoutfs_inode_get_onoff(inode, &online, &offline);
count = max_t(u64, 1, min(count, online));
}
ret = scoutfs_alloc_data(sb, datinf->alloc, datinf->wri,
&datinf->dalloc, count, &blkno, &count);
if (ret < 0)

View File

@@ -8,10 +8,10 @@
/mnt/test/test/data-prealloc/file-1: extents: 32
/mnt/test/test/data-prealloc/file-2: extents: 32
== any writes to region prealloc get full extents
/mnt/test/test/data-prealloc/file-1: extents: 8
/mnt/test/test/data-prealloc/file-2: extents: 8
/mnt/test/test/data-prealloc/file-1: extents: 8
/mnt/test/test/data-prealloc/file-2: extents: 8
/mnt/test/test/data-prealloc/file-1: extents: 4
/mnt/test/test/data-prealloc/file-2: extents: 4
/mnt/test/test/data-prealloc/file-1: extents: 4
/mnt/test/test/data-prealloc/file-2: extents: 4
== streaming offline writes get full extents either way
/mnt/test/test/data-prealloc/file-1: extents: 4
/mnt/test/test/data-prealloc/file-2: extents: 4
@@ -20,8 +20,8 @@
== goofy preallocation amounts work
/mnt/test/test/data-prealloc/file-1: extents: 6
/mnt/test/test/data-prealloc/file-2: extents: 6
/mnt/test/test/data-prealloc/file-1: extents: 10
/mnt/test/test/data-prealloc/file-2: extents: 10
/mnt/test/test/data-prealloc/file-1: extents: 6
/mnt/test/test/data-prealloc/file-2: extents: 6
/mnt/test/test/data-prealloc/file-1: extents: 3
/mnt/test/test/data-prealloc/file-2: extents: 3
== block writes into region allocs hole