From 365048b785fd3405ede1e835680276ce5477e313 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Thu, 5 Oct 2017 12:12:35 -0700 Subject: [PATCH] scoutfs: add full lock arg to _item_set_batch() Add the full lock arg to _item_set_batch() so that it can verify lock coverage. Signed-off-by: Zach Brown --- kmod/src/item.c | 7 ++++--- kmod/src/item.h | 2 +- kmod/src/xattr.c | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/kmod/src/item.c b/kmod/src/item.c index 2ace7963..2b0665cc 100644 --- a/kmod/src/item.c +++ b/kmod/src/item.c @@ -1211,7 +1211,7 @@ out: int scoutfs_item_set_batch(struct super_block *sb, struct list_head *list, struct scoutfs_key_buf *first, struct scoutfs_key_buf *last, int sif, - struct scoutfs_key_buf *end) + struct scoutfs_lock *lock) { struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); struct item_cache *cac = sbi->item_cache; @@ -1235,7 +1235,8 @@ int scoutfs_item_set_batch(struct super_block *sb, struct list_head *list, trace_scoutfs_item_set_batch(sb, first, last); if (WARN_ON_ONCE(scoutfs_key_compare(first, last) > 0) || - WARN_ON_ONCE(scoutfs_key_compare(end, last) < 0)) + WARN_ON_ONCE(!lock_coverage(lock, first, WRITE)) || + WARN_ON_ONCE(!lock_coverage(lock, last, WRITE))) return -EINVAL; range_end = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE); @@ -1256,7 +1257,7 @@ int scoutfs_item_set_batch(struct super_block *sb, struct list_head *list, } spin_unlock_irqrestore(&cac->lock, flags); - ret = scoutfs_manifest_read_items(sb, range_end, end); + ret = scoutfs_manifest_read_items(sb, range_end, lock->end); spin_lock_irqsave(&cac->lock, flags); if (ret) diff --git a/kmod/src/item.h b/kmod/src/item.h index 6affdb81..ac64e280 100644 --- a/kmod/src/item.h +++ b/kmod/src/item.h @@ -49,7 +49,7 @@ int scoutfs_item_insert_batch(struct super_block *sb, struct list_head *list, int scoutfs_item_set_batch(struct super_block *sb, struct list_head *list, struct scoutfs_key_buf *first, struct scoutfs_key_buf *last, int sif, - struct scoutfs_key_buf *end); + struct scoutfs_lock *lock); void scoutfs_item_free_batch(struct super_block *sb, struct list_head *list); bool scoutfs_item_has_dirty(struct super_block *sb); diff --git a/kmod/src/xattr.c b/kmod/src/xattr.c index e26da49d..20978835 100644 --- a/kmod/src/xattr.c +++ b/kmod/src/xattr.c @@ -331,7 +331,7 @@ retry: goto unlock; ret = scoutfs_dirty_inode_item(inode, lck) ?: - scoutfs_item_set_batch(sb, &list, key, last, sif, lck->end); + scoutfs_item_set_batch(sb, &list, key, last, sif, lck); if (ret == 0) { /* XXX do these want i_mutex or anything? */ inode_inc_iversion(inode);