From 1c6e3e39bf5f02773d3f2f3ad78155ca81bfbfff Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Thu, 28 Sep 2017 16:50:19 -0700 Subject: [PATCH] scoutfs: add full lock coverage to _item_next*() Add the full lock argument to _item_next*() so that it can verify lock coverage in addition to limiting item cache population to the range covered by the lock. Signed-off-by: Zach Brown --- kmod/src/data.c | 7 +++---- kmod/src/dir.c | 4 ++-- kmod/src/inode.c | 2 +- kmod/src/ioctl.c | 2 +- kmod/src/item.c | 24 +++++++++++++++--------- kmod/src/item.h | 6 +++--- kmod/src/xattr.c | 4 ++-- 7 files changed, 27 insertions(+), 22 deletions(-) diff --git a/kmod/src/data.c b/kmod/src/data.c index 3cc5b04e..73289828 100644 --- a/kmod/src/data.c +++ b/kmod/src/data.c @@ -638,7 +638,7 @@ int scoutfs_data_truncate_items(struct super_block *sb, u64 ino, u64 iblock, init_mapping_key(&key, &bmk, ino, iblock); scoutfs_kvec_init(val, map->encoded, sizeof(map->encoded)); - ret = scoutfs_item_next(sb, &key, &last_key, val, lock->end); + ret = scoutfs_item_next(sb, &key, &last_key, val, lock); if (ret < 0) { if (ret == -ENOENT) ret = 0; @@ -881,7 +881,7 @@ static int find_free_segno(struct super_block *sb, u64 *segno) SCOUTFS_FREE_BITS_SEGNO_TYPE); scoutfs_kvec_init(val, &frb, sizeof(struct scoutfs_free_bits)); - ret = scoutfs_item_next(sb, &key, &last_key, val, lock->end); + ret = scoutfs_item_next(sb, &key, &last_key, val, lock); if (ret < 0) goto out; @@ -1296,8 +1296,7 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, init_mapping_key(&key, &bmk, ino, blk_off); scoutfs_kvec_init(val, &map->encoded, sizeof(map->encoded)); - ret = scoutfs_item_next(sb, &key, &last_key, val, - inode_lock->end); + ret = scoutfs_item_next(sb, &key, &last_key, val, inode_lock); if (ret < 0) { if (ret == -ENOENT) ret = 0; diff --git a/kmod/src/dir.c b/kmod/src/dir.c index 73758a5a..e7c38265 100644 --- a/kmod/src/dir.c +++ b/kmod/src/dir.c @@ -379,7 +379,7 @@ static int scoutfs_readdir(struct file *file, void *dirent, filldir_t filldir) scoutfs_kvec_init(val, dent, item_len); ret = scoutfs_item_next_same_min(sb, &key, &last_key, val, offsetof(struct scoutfs_dirent, name[1]), - dir_lock->end); + dir_lock); if (ret < 0) { if (ret == -ENOENT) ret = 0; @@ -1080,7 +1080,7 @@ static int add_next_linkref(struct super_block *sb, u64 ino, if (ret) goto out; - ret = scoutfs_item_next(sb, &key, &last, NULL, lock->end); + ret = scoutfs_item_next(sb, &key, &last, NULL, lock); scoutfs_unlock(sb, lock, DLM_LOCK_PR); lock = NULL; diff --git a/kmod/src/inode.c b/kmod/src/inode.c index 91f98bf9..b576333a 100644 --- a/kmod/src/inode.c +++ b/kmod/src/inode.c @@ -1009,7 +1009,7 @@ int scoutfs_scan_orphans(struct super_block *sb) init_orphan_key(&last, &last_okey, sbi->node_id, ~0ULL); while (1) { - ret = scoutfs_item_next_same(sb, &key, &last, NULL, lock->end); + ret = scoutfs_item_next_same(sb, &key, &last, NULL, lock); if (ret == -ENOENT) /* No more orphan items */ break; if (ret < 0) diff --git a/kmod/src/ioctl.c b/kmod/src/ioctl.c index 17747af8..cb4ba017 100644 --- a/kmod/src/ioctl.c +++ b/kmod/src/ioctl.c @@ -123,7 +123,7 @@ static long scoutfs_ioc_walk_inodes(struct file *file, unsigned long arg) for (nr = 0; nr < walk.nr_entries; ) { - ret = scoutfs_item_next_same(sb, &key, &last_key, NULL, lock->end); + ret = scoutfs_item_next_same(sb, &key, &last_key, NULL, lock); if (ret < 0 && ret != -ENOENT) break; diff --git a/kmod/src/item.c b/kmod/src/item.c index dc2263fe..0b62b730 100644 --- a/kmod/src/item.c +++ b/kmod/src/item.c @@ -911,7 +911,7 @@ static struct cached_item *item_for_next(struct rb_root *root, */ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key, struct scoutfs_key_buf *last, struct kvec *val, - struct scoutfs_key_buf *end) + struct scoutfs_lock *lock) { struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); struct item_cache *cac = sbi->item_cache; @@ -923,8 +923,8 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key, int ret; /* use the end key as the last key if it's closer to reduce compares */ - if (end && scoutfs_key_compare(end, last) < 0) - last = end; + if (scoutfs_key_compare(lock->end, last) < 0) + last = lock->end; /* convenience to avoid searching if caller iterates past their last */ if (scoutfs_key_compare(key, last) > 0) { @@ -932,6 +932,11 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key, goto out; } + if (WARN_ON_ONCE(!lock_coverage(lock, key, READ))) { + ret = -EINVAL; + goto out; + } + pos = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE); range_end = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE); if (!pos || !range_end) { @@ -948,13 +953,14 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key, cached = check_range(sb, &cac->ranges, pos, range_end); trace_scoutfs_item_next_range_check(sb, !!cached, key, - pos, last, end, range_end); + pos, last, lock->end, + range_end); if (!cached) { /* populate missing cached range starting at pos */ spin_unlock_irqrestore(&cac->lock, flags); - ret = scoutfs_manifest_read_items(sb, pos, end); + ret = scoutfs_manifest_read_items(sb, pos, lock->end); spin_lock_irqsave(&cac->lock, flags); if (ret) @@ -1008,7 +1014,7 @@ int scoutfs_item_next_same_min(struct super_block *sb, struct scoutfs_key_buf *key, struct scoutfs_key_buf *last, struct kvec *val, int len, - struct scoutfs_key_buf *end) + struct scoutfs_lock *lock) { int key_len = key->key_len; int ret; @@ -1018,7 +1024,7 @@ int scoutfs_item_next_same_min(struct super_block *sb, if (WARN_ON_ONCE(!val || scoutfs_kvec_length(val) < len)) return -EINVAL; - ret = scoutfs_item_next(sb, key, last, val, end); + ret = scoutfs_item_next(sb, key, last, val, lock); if (ret >= 0 && (key->key_len != key_len || ret < len)) ret = -EIO; @@ -1033,14 +1039,14 @@ int scoutfs_item_next_same_min(struct super_block *sb, */ int scoutfs_item_next_same(struct super_block *sb, struct scoutfs_key_buf *key, struct scoutfs_key_buf *last, struct kvec *val, - struct scoutfs_key_buf *end) + struct scoutfs_lock *lock) { int key_len = key->key_len; int ret; trace_scoutfs_item_next_same(sb, key_len); - ret = scoutfs_item_next(sb, key, last, val, end); + ret = scoutfs_item_next(sb, key, last, val, lock); if (ret >= 0 && (key->key_len != key_len)) ret = -EIO; diff --git a/kmod/src/item.h b/kmod/src/item.h index 0fd1eff9..8af12785 100644 --- a/kmod/src/item.h +++ b/kmod/src/item.h @@ -19,15 +19,15 @@ int scoutfs_item_lookup_exact(struct super_block *sb, int size, struct scoutfs_lock *lock); int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key, struct scoutfs_key_buf *last, struct kvec *val, - struct scoutfs_key_buf *end); + struct scoutfs_lock *lock); int scoutfs_item_next_same_min(struct super_block *sb, struct scoutfs_key_buf *key, struct scoutfs_key_buf *last, struct kvec *val, int len, - struct scoutfs_key_buf *end); + struct scoutfs_lock *lock); int scoutfs_item_next_same(struct super_block *sb, struct scoutfs_key_buf *key, struct scoutfs_key_buf *last, struct kvec *val, - struct scoutfs_key_buf *end); + struct scoutfs_lock *lock); int scoutfs_item_create(struct super_block *sb, struct scoutfs_key_buf *key, struct kvec *val); int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key_buf *key, diff --git a/kmod/src/xattr.c b/kmod/src/xattr.c index f72b6746..72525470 100644 --- a/kmod/src/xattr.c +++ b/kmod/src/xattr.c @@ -392,7 +392,7 @@ ssize_t scoutfs_listxattr(struct dentry *dentry, char *buffer, size_t size) total = 0; for (;;) { - ret = scoutfs_item_next(sb, key, last, NULL, lck->end); + ret = scoutfs_item_next(sb, key, last, NULL, lck); if (ret < 0) { if (ret == -ENOENT) ret = total; @@ -474,7 +474,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino) /* the inode is dead so we don't need the xattr sem */ for (;;) { - ret = scoutfs_item_next(sb, key, last, NULL, lck->end); + ret = scoutfs_item_next(sb, key, last, NULL, lck); if (ret < 0) { if (ret == -ENOENT) ret = 0;