mirror of
https://github.com/versity/scoutfs.git
synced 2026-02-07 19:20:44 +00:00
scoutfs: remove unused lock code
There's a fair amount of lock.c that's dead code now that we're using dlmglue. Some of the dead code is seen as unused and is throwing warnings. This silences the errors by removing the code. Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
@@ -350,57 +350,6 @@ static void free_lock_tree(struct super_block *sb)
|
||||
}
|
||||
}
|
||||
|
||||
static void scoutfs_ast(void *astarg)
|
||||
{
|
||||
struct scoutfs_lock *lock = astarg;
|
||||
DECLARE_LOCK_INFO(lock->sb, linfo);
|
||||
|
||||
trace_scoutfs_ast(lock->sb, lock);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
lock->mode = lock->rqmode;
|
||||
/* Clear blocking flag when we are granted an unlock request */
|
||||
if (lock->rqmode == DLM_LOCK_IV)
|
||||
lock->flags &= ~SCOUTFS_LOCK_BLOCKING;
|
||||
lock->rqmode = DLM_LOCK_IV;
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
wake_up(&linfo->waitq);
|
||||
}
|
||||
|
||||
static void queue_blocking_work(struct lock_info *linfo,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
if (!(lock->flags & SCOUTFS_LOCK_QUEUED)) {
|
||||
/* Take a ref for the workqueue */
|
||||
lock->flags |= SCOUTFS_LOCK_QUEUED;
|
||||
lock->refcnt++;
|
||||
queue_work(linfo->downconvert_wq, &lock->dc_work);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_lock_blocking(struct lock_info *linfo,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
lock->flags |= SCOUTFS_LOCK_BLOCKING;
|
||||
if (lock->holders == 0)
|
||||
queue_blocking_work(linfo, lock);
|
||||
}
|
||||
|
||||
static void scoutfs_bast(void *astarg, int mode)
|
||||
{
|
||||
struct scoutfs_lock *lock = astarg;
|
||||
struct lock_info *linfo = SCOUTFS_SB(lock->sb)->lock_info;
|
||||
|
||||
trace_scoutfs_bast(lock->sb, lock);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
set_lock_blocking(linfo, lock);
|
||||
spin_unlock(&linfo->lock);
|
||||
}
|
||||
|
||||
static int lock_granted(struct lock_info *linfo, struct scoutfs_lock *lock,
|
||||
int mode)
|
||||
{
|
||||
@@ -413,17 +362,6 @@ static int lock_granted(struct lock_info *linfo, struct scoutfs_lock *lock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lock_blocking(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
ret = !!(lock->flags & SCOUTFS_LOCK_BLOCKING);
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire a coherent lock on the given range of keys. While the lock
|
||||
* is held other lockers are serialized. Cache coherency is maintained
|
||||
|
||||
Reference in New Issue
Block a user