Work around sparse warning in _item_write_done

scoutfs_item_write_done() acquires the cinf dirty_lock and pg rwlock out
of order.  It uses a trylock to detect failure and back off of both
before retrying.

sparse seems to have some peculiar sensitivity to following the else
branch from a failed trylock while already in a context.  Doing that
consistently triggered the spurious mismatched context warning.

This refactors the loop to always drop and reacquire the dirty_lock
after attemping the trylock.  It's not great, but this shouldn't be very
contended because the transaction write has serialized write lock
holderse that would be trying to dirty items.  The silly lock noise will
be mostly cached.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2025-09-30 12:17:49 -07:00
parent 9741d40e10
commit 3d9f10de93

View File

@@ -2401,6 +2401,12 @@ out:
* The caller has successfully committed all the dirty btree blocks that
* contained the currently dirty items. Clear all the dirty items and
* pages.
*
* This strange lock/trylock loop comes from sparse issuing spurious
* mismatched context warnings if we do anything (like unlock and relax)
* in the else branch of the failed trylock. We're jumping through
* hoops to not use the else but still drop and reacquire the dirty_lock
* if the trylock fails.
*/
int scoutfs_item_write_done(struct super_block *sb)
{
@@ -2409,40 +2415,30 @@ int scoutfs_item_write_done(struct super_block *sb)
struct cached_item *tmp;
struct cached_page *pg;
retry:
spin_lock(&cinf->dirty_lock);
while ((pg = list_first_entry_or_null(&cinf->dirty_list,
struct cached_page,
dirty_head))) {
if (!write_trylock(&pg->rwlock)) {
while ((pg = list_first_entry_or_null(&cinf->dirty_list, struct cached_page, dirty_head))) {
if (write_trylock(&pg->rwlock)) {
spin_unlock(&cinf->dirty_lock);
cpu_relax();
goto retry;
}
list_for_each_entry_safe(item, tmp, &pg->dirty_list,
dirty_head) {
clear_item_dirty(sb, cinf, pg, item);
if (item->delta)
scoutfs_inc_counter(sb, item_delta_written);
/* free deletion items */
if (item->deletion || item->delta)
erase_item(pg, item);
else
item->persistent = 1;
}
write_unlock(&pg->rwlock);
spin_lock(&cinf->dirty_lock);
}
spin_unlock(&cinf->dirty_lock);
list_for_each_entry_safe(item, tmp, &pg->dirty_list,
dirty_head) {
clear_item_dirty(sb, cinf, pg, item);
if (item->delta)
scoutfs_inc_counter(sb, item_delta_written);
/* free deletion items */
if (item->deletion || item->delta)
erase_item(pg, item);
else
item->persistent = 1;
}
write_unlock(&pg->rwlock);
spin_lock(&cinf->dirty_lock);
}
} while (pg);
spin_unlock(&cinf->dirty_lock);
return 0;