mirror of
https://github.com/versity/scoutfs.git
synced 2026-04-27 08:35:05 +00:00
scoutfs: add full lock coverage to _item_next*()
Add the full lock argument to _item_next*() so that it can verify lock coverage in addition to limiting item cache population to the range covered by the lock. Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
@@ -638,7 +638,7 @@ int scoutfs_data_truncate_items(struct super_block *sb, u64 ino, u64 iblock,
|
||||
init_mapping_key(&key, &bmk, ino, iblock);
|
||||
scoutfs_kvec_init(val, map->encoded, sizeof(map->encoded));
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, val, lock->end);
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, val, lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
@@ -881,7 +881,7 @@ static int find_free_segno(struct super_block *sb, u64 *segno)
|
||||
SCOUTFS_FREE_BITS_SEGNO_TYPE);
|
||||
scoutfs_kvec_init(val, &frb, sizeof(struct scoutfs_free_bits));
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, val, lock->end);
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, val, lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1296,8 +1296,7 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
init_mapping_key(&key, &bmk, ino, blk_off);
|
||||
scoutfs_kvec_init(val, &map->encoded, sizeof(map->encoded));
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, val,
|
||||
inode_lock->end);
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, val, inode_lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
@@ -379,7 +379,7 @@ static int scoutfs_readdir(struct file *file, void *dirent, filldir_t filldir)
|
||||
scoutfs_kvec_init(val, dent, item_len);
|
||||
ret = scoutfs_item_next_same_min(sb, &key, &last_key, val,
|
||||
offsetof(struct scoutfs_dirent, name[1]),
|
||||
dir_lock->end);
|
||||
dir_lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
@@ -1080,7 +1080,7 @@ static int add_next_linkref(struct super_block *sb, u64 ino,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &last, NULL, lock->end);
|
||||
ret = scoutfs_item_next(sb, &key, &last, NULL, lock);
|
||||
scoutfs_unlock(sb, lock, DLM_LOCK_PR);
|
||||
lock = NULL;
|
||||
|
||||
|
||||
@@ -1009,7 +1009,7 @@ int scoutfs_scan_orphans(struct super_block *sb)
|
||||
init_orphan_key(&last, &last_okey, sbi->node_id, ~0ULL);
|
||||
|
||||
while (1) {
|
||||
ret = scoutfs_item_next_same(sb, &key, &last, NULL, lock->end);
|
||||
ret = scoutfs_item_next_same(sb, &key, &last, NULL, lock);
|
||||
if (ret == -ENOENT) /* No more orphan items */
|
||||
break;
|
||||
if (ret < 0)
|
||||
|
||||
@@ -123,7 +123,7 @@ static long scoutfs_ioc_walk_inodes(struct file *file, unsigned long arg)
|
||||
|
||||
for (nr = 0; nr < walk.nr_entries; ) {
|
||||
|
||||
ret = scoutfs_item_next_same(sb, &key, &last_key, NULL, lock->end);
|
||||
ret = scoutfs_item_next_same(sb, &key, &last_key, NULL, lock);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
break;
|
||||
|
||||
|
||||
@@ -911,7 +911,7 @@ static struct cached_item *item_for_next(struct rb_root *root,
|
||||
*/
|
||||
int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
struct scoutfs_key_buf *last, struct kvec *val,
|
||||
struct scoutfs_key_buf *end)
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct item_cache *cac = sbi->item_cache;
|
||||
@@ -923,8 +923,8 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
int ret;
|
||||
|
||||
/* use the end key as the last key if it's closer to reduce compares */
|
||||
if (end && scoutfs_key_compare(end, last) < 0)
|
||||
last = end;
|
||||
if (scoutfs_key_compare(lock->end, last) < 0)
|
||||
last = lock->end;
|
||||
|
||||
/* convenience to avoid searching if caller iterates past their last */
|
||||
if (scoutfs_key_compare(key, last) > 0) {
|
||||
@@ -932,6 +932,11 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!lock_coverage(lock, key, READ))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pos = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE);
|
||||
range_end = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE);
|
||||
if (!pos || !range_end) {
|
||||
@@ -948,13 +953,14 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
cached = check_range(sb, &cac->ranges, pos, range_end);
|
||||
|
||||
trace_scoutfs_item_next_range_check(sb, !!cached, key,
|
||||
pos, last, end, range_end);
|
||||
pos, last, lock->end,
|
||||
range_end);
|
||||
|
||||
if (!cached) {
|
||||
/* populate missing cached range starting at pos */
|
||||
spin_unlock_irqrestore(&cac->lock, flags);
|
||||
|
||||
ret = scoutfs_manifest_read_items(sb, pos, end);
|
||||
ret = scoutfs_manifest_read_items(sb, pos, lock->end);
|
||||
|
||||
spin_lock_irqsave(&cac->lock, flags);
|
||||
if (ret)
|
||||
@@ -1008,7 +1014,7 @@ int scoutfs_item_next_same_min(struct super_block *sb,
|
||||
struct scoutfs_key_buf *key,
|
||||
struct scoutfs_key_buf *last,
|
||||
struct kvec *val, int len,
|
||||
struct scoutfs_key_buf *end)
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
int key_len = key->key_len;
|
||||
int ret;
|
||||
@@ -1018,7 +1024,7 @@ int scoutfs_item_next_same_min(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(!val || scoutfs_kvec_length(val) < len))
|
||||
return -EINVAL;
|
||||
|
||||
ret = scoutfs_item_next(sb, key, last, val, end);
|
||||
ret = scoutfs_item_next(sb, key, last, val, lock);
|
||||
if (ret >= 0 && (key->key_len != key_len || ret < len))
|
||||
ret = -EIO;
|
||||
|
||||
@@ -1033,14 +1039,14 @@ int scoutfs_item_next_same_min(struct super_block *sb,
|
||||
*/
|
||||
int scoutfs_item_next_same(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
struct scoutfs_key_buf *last, struct kvec *val,
|
||||
struct scoutfs_key_buf *end)
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
int key_len = key->key_len;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_item_next_same(sb, key_len);
|
||||
|
||||
ret = scoutfs_item_next(sb, key, last, val, end);
|
||||
ret = scoutfs_item_next(sb, key, last, val, lock);
|
||||
if (ret >= 0 && (key->key_len != key_len))
|
||||
ret = -EIO;
|
||||
|
||||
|
||||
@@ -19,15 +19,15 @@ int scoutfs_item_lookup_exact(struct super_block *sb,
|
||||
int size, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_next(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
struct scoutfs_key_buf *last, struct kvec *val,
|
||||
struct scoutfs_key_buf *end);
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_next_same_min(struct super_block *sb,
|
||||
struct scoutfs_key_buf *key,
|
||||
struct scoutfs_key_buf *last,
|
||||
struct kvec *val, int len,
|
||||
struct scoutfs_key_buf *end);
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_next_same(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
struct scoutfs_key_buf *last, struct kvec *val,
|
||||
struct scoutfs_key_buf *end);
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_create(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
struct kvec *val);
|
||||
int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key_buf *key,
|
||||
|
||||
@@ -392,7 +392,7 @@ ssize_t scoutfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||
|
||||
total = 0;
|
||||
for (;;) {
|
||||
ret = scoutfs_item_next(sb, key, last, NULL, lck->end);
|
||||
ret = scoutfs_item_next(sb, key, last, NULL, lck);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = total;
|
||||
@@ -474,7 +474,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino)
|
||||
/* the inode is dead so we don't need the xattr sem */
|
||||
|
||||
for (;;) {
|
||||
ret = scoutfs_item_next(sb, key, last, NULL, lck->end);
|
||||
ret = scoutfs_item_next(sb, key, last, NULL, lck);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
Reference in New Issue
Block a user