Directly queue cluster lock work

We had a little helper that scheduled work after testing the list, which
required holding the spinlock.  This was a little too crude and required
scoutfs_unlock() acquiring the invalidate work list spinlock even though
it already had the cluster lock spinlock held and could see that there
are invalidate requests pending and should queue the invalidation work.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2022-06-07 11:02:40 -07:00
committed by Chris Kirby
parent 09fe4fddd4
commit 6b67aee2e3

View File

@@ -126,14 +126,6 @@ static void init_work_list(struct work_list *wlist, work_func_t func)
INIT_LIST_HEAD(&wlist->list);
}
static void queue_nonempty_work_list(struct lock_info *linfo, struct work_list *wlist)
{
assert_spin_locked(&wlist->lock);
if (!list_empty(&wlist->list))
queue_work(linfo->workq, &wlist->work);
}
/*
* Returns true if a lock with the granted mode can satisfy a requested
* mode. This is directional. A read lock is satisfied by a write lock
@@ -921,7 +913,7 @@ retry:
/* another request arrived, back on the list and requeue */
spin_lock(&linfo->inv_wlist.lock);
list_move_tail(&lock->inv_head, &linfo->inv_wlist.list);
queue_nonempty_work_list(linfo, &linfo->inv_wlist);
queue_work(linfo->workq, &linfo->inv_wlist.work);
spin_unlock(&linfo->inv_wlist.lock);
}
@@ -978,7 +970,7 @@ int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
spin_lock(&linfo->inv_wlist.lock);
list_add_tail(&lock->inv_head, &linfo->inv_wlist.list);
lock->invalidate_pending = 1;
queue_nonempty_work_list(linfo, &linfo->inv_wlist);
queue_work(linfo->workq, &linfo->inv_wlist.work);
spin_unlock(&linfo->inv_wlist.lock);
}
list_add_tail(&ireq->head, &lock->inv_req_list);
@@ -1488,9 +1480,8 @@ void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock, enum scou
trace_scoutfs_lock_unlock(sb, lock);
spin_lock(&linfo->inv_wlist.lock);
queue_nonempty_work_list(linfo, &linfo->inv_wlist);
spin_unlock(&linfo->inv_wlist.lock);
if (!list_empty(&lock->inv_req_list))
queue_work(linfo->workq, &linfo->inv_wlist.work);
spin_unlock(&lock->lock);
wake_up(&lock->waitq);
@@ -1717,7 +1708,8 @@ static unsigned long lock_scan_objects(struct shrinker *shrink,
spin_unlock(&linfo->lock);
spin_lock(&linfo->shrink_wlist.lock);
queue_nonempty_work_list(linfo, &linfo->shrink_wlist);
if (!list_empty(&linfo->shrink_wlist.list))
queue_work(linfo->workq, &linfo->shrink_wlist.work);
spin_unlock(&linfo->shrink_wlist.lock);
out: