scoutfs: add lock coverage testing to item_lookup*

Let's give the item functions the full lock so that they can make sure
that the lock has coverage for the keys involved in the operation.

This _lookup*() conversion is first so it adds the
lock_coverager() helper.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2017-09-28 15:45:45 -07:00
committed by Mark Fasheh
parent 1193fbc9c5
commit 55709c4345
5 changed files with 43 additions and 12 deletions

View File

@@ -277,8 +277,7 @@ static struct dentry *scoutfs_lookup(struct inode *dir, struct dentry *dentry,
scoutfs_kvec_init(val, &dent, sizeof(dent));
ret = scoutfs_item_lookup_exact(sb, key, val, sizeof(dent),
dir_lock->end);
ret = scoutfs_item_lookup_exact(sb, key, val, sizeof(dent), dir_lock);
scoutfs_unlock(sb, dir_lock, DLM_LOCK_PR);
if (ret == -ENOENT) {
ino = 0;
@@ -860,7 +859,7 @@ static int symlink_item_ops(struct super_block *sb, int op, u64 ino,
ret = scoutfs_item_create(sb, &key, val);
else if (op == SYM_LOOKUP)
ret = scoutfs_item_lookup_exact(sb, &key, val, bytes,
lock->end);
lock);
else if (op == SYM_DELETE)
ret = scoutfs_item_delete(sb, &key, lock->end);
if (ret)
@@ -1273,7 +1272,7 @@ static int verify_entry(struct super_block *sb, u64 dir_ino, const char *name,
scoutfs_kvec_init(val, &dent, sizeof(dent));
ret = scoutfs_item_lookup_exact(sb, key, val, sizeof(dent), lock->end);
ret = scoutfs_item_lookup_exact(sb, key, val, sizeof(dent), lock);
if (ret == 0 && le64_to_cpu(dent.ino) != ino)
ret = -ENOENT;
else if (ret == -ENOENT && ino == 0)

View File

@@ -275,7 +275,7 @@ int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
mutex_lock(&si->item_mutex);
if (atomic64_read(&si->last_refreshed) < refresh_gen) {
ret = scoutfs_item_lookup_exact(sb, &key, val, sizeof(sinode),
lock->end);
lock);
if (ret == 0) {
load_inode(inode, &sinode);
atomic64_set(&si->last_refreshed, refresh_gen);

View File

@@ -730,6 +730,35 @@ restart:
}
}
/*
* Return true if the lock protects the use of the key. Some locks not
* intended for item use don't have a key range and we wan't to safely
* detect that. We use the block 'rw' constants just because they're
* convenient. The level test is racey but it's a char.. how racy can
* it be? :).
*/
static bool lock_coverage(struct scoutfs_lock *lock,
struct scoutfs_key_buf *key, int rw)
{
bool writing = rw & WRITE;
signed char level;
if (rw & ~WRITE)
return false;
if (!lock || !lock->start || !lock->end)
return false;
level = ACCESS_ONCE(lock->lockres.l_level);
if ((writing && level != DLM_LOCK_EX) ||
(!writing && level != DLM_LOCK_EX && level != DLM_LOCK_PR))
return false;
return scoutfs_key_compare_ranges(key, key,
lock->start, lock->end) == 0;
}
/*
* Find an item with the given key and copy its value into the caller's
* value vector. The amount of bytes copied is returned which can be 0
@@ -739,7 +768,7 @@ restart:
* and inserted into the cache.
*/
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key_buf *key,
struct kvec *val, struct scoutfs_key_buf *end)
struct kvec *val, struct scoutfs_lock *lock)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct item_cache *cac = sbi->item_cache;
@@ -747,6 +776,9 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key_buf *key,
unsigned long flags;
int ret;
if (WARN_ON_ONCE(!lock_coverage(lock, key, READ)))
return -EINVAL;
trace_scoutfs_item_lookup(sb, key);
do {
@@ -765,7 +797,7 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key_buf *key,
spin_unlock_irqrestore(&cac->lock, flags);
} while (ret == -ENODATA &&
(ret = scoutfs_manifest_read_items(sb, key, end)) == 0);
(ret = scoutfs_manifest_read_items(sb, key, lock->end)) == 0);
trace_scoutfs_item_lookup_ret(sb, ret);
return ret;
@@ -786,11 +818,11 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key_buf *key,
*/
int scoutfs_item_lookup_exact(struct super_block *sb,
struct scoutfs_key_buf *key, struct kvec *val,
int size, struct scoutfs_key_buf *end)
int size, struct scoutfs_lock *lock)
{
int ret;
ret = scoutfs_item_lookup(sb, key, val, end);
ret = scoutfs_item_lookup(sb, key, val, lock);
if (ret == size)
ret = 0;
else if (ret >= 0)

View File

@@ -13,10 +13,10 @@ struct scoutfs_segment;
struct scoutfs_key_buf;
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key_buf *key,
struct kvec *val, struct scoutfs_key_buf *end);
struct kvec *val, struct scoutfs_lock *lock);
int scoutfs_item_lookup_exact(struct super_block *sb,
struct scoutfs_key_buf *key, struct kvec *val,
int size, struct scoutfs_key_buf *end);
int size, struct scoutfs_lock *lock);
int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key,
struct scoutfs_key_buf *last, struct kvec *val,
struct scoutfs_key_buf *end);

View File

@@ -190,7 +190,7 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
for_each_xattr_item(key, val, &vh, buffer, size, part, off, bytes) {
ret = scoutfs_item_lookup(sb, key, val, lck->end);
ret = scoutfs_item_lookup(sb, key, val, lck);
if (ret < 0) {
if (ret == -ENOENT)
ret = -ENODATA;