From d58c8d5993ea8c0468ac79461af0e68ea5da7e41 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Fri, 9 Mar 2018 13:27:54 -0800 Subject: [PATCH] scoutfs: move lock work after dependencies Some of the lock processing path was happening too early. Both maintainance of the locks on the LRU and waking waiters depends on whether there is work pending and on the the granted mode. Those are changed in the middle by processing so we need to move these two bits of work down so that they can consume the updated state. Signed-off-by: Zach Brown --- kmod/src/lock.c | 56 ++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/kmod/src/lock.c b/kmod/src/lock.c index d5eeced1..72eeb88f 100644 --- a/kmod/src/lock.c +++ b/kmod/src/lock.c @@ -343,39 +343,12 @@ static void lock_process(struct lock_info *linfo, struct scoutfs_lock *lock) if (linfo->shutdown) return; - /* only idle locks are on the lru */ - idle = lock_idle(lock); - if (list_empty(&lock->lru_head) && idle) { - list_add_tail(&lock->lru_head, &linfo->lru_list); - linfo->lru_nr++; - - } else if (!list_empty(&lock->lru_head) && !idle) { - list_del_init(&lock->lru_head); - linfo->lru_nr--; - } - /* errored locks are torn down */ if (lock->error) { wake_up(&lock->waitq); goto out; } - /* - * Wake any waiters who might be able to use the lock now. - * Notice that this ignores the presence of basts! This lets us - * recursively acquire locks in one task without having to track - * per-task lock references. It comes at the cost of fairness. - * Spinning overlapping users can delay a bast down conversion - * indefinitely. - */ - for (mode = 0; mode < SCOUTFS_LOCK_NR_MODES; mode++) { - if (lock->waiters[mode] && - lock_modes_match(lock->granted_mode, mode)) { - wake_up(&lock->waitq); - break; - } - } - /* * Try to down convert a lock in response to a bast once users * are done with it. We may have to wait for a grace period @@ -431,13 +404,40 @@ static void lock_process(struct lock_info *linfo, struct scoutfs_lock *lock) } } + /* + * Wake any waiters who might be able to use the lock now. + * Notice that this ignores the presence of basts! This lets us + * recursively acquire locks in one task without having to track + * per-task lock references. It comes at the cost of fairness. + * Spinning overlapping users can delay a bast down conversion + * indefinitely. + */ + for (mode = 0; mode < SCOUTFS_LOCK_NR_MODES; mode++) { + if (lock->waiters[mode] && + lock_modes_match(lock->granted_mode, mode)) { + wake_up(&lock->waitq); + break; + } + } + out: + /* only idle locks are on the lru */ + idle = lock_idle(lock); + if (list_empty(&lock->lru_head) && idle) { + list_add_tail(&lock->lru_head, &linfo->lru_list); + linfo->lru_nr++; + + } else if (!list_empty(&lock->lru_head) && !idle) { + list_del_init(&lock->lru_head); + linfo->lru_nr--; + } + /* * We can free the lock once it's idle and it's either never * been initially locked or has been unlocked, both of which we * indicate with IV. */ - if (lock_idle(lock) && lock->granted_mode == DLM_LOCK_IV) + if (idle && lock->granted_mode == DLM_LOCK_IV) lock_free(linfo, lock); }