From 2aa613dae537edd9b4c00014e23e14712e8f0474 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Thu, 8 Mar 2018 15:19:07 -0800 Subject: [PATCH] scoutfs: add scoutfs_item_range_cached() Add a quick helper for querying if a given range of keys is covered by the item cache. Signed-off-by: Zach Brown --- kmod/src/item.c | 60 +++++++++++++++++++++++++++++++++++++++++-------- kmod/src/item.h | 3 +++ 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/kmod/src/item.c b/kmod/src/item.c index 41140ee2..a5714e2d 100644 --- a/kmod/src/item.c +++ b/kmod/src/item.c @@ -1683,6 +1683,19 @@ static struct cached_item *next_dirty(struct cached_item *item) return NULL; } +static bool dirty_item_within(struct rb_root *root, + struct scoutfs_key_buf *from, + struct scoutfs_key_buf *end) +{ + struct cached_item *item; + + item = next_item(root, from); + if (item && !(item->dirty & ITEM_DIRTY)) + item = next_dirty(item); + + return item && scoutfs_key_compare(item->key, end) <= 0; +} + bool scoutfs_item_has_dirty(struct super_block *sb) { struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); @@ -1697,6 +1710,41 @@ bool scoutfs_item_has_dirty(struct super_block *sb) return has; } +/* + * Return true if the item cache covers the given range. If dirty is + * provided then we only return true if there are dirty items in the + * range. + * + * If the start of the query range doesn't overlap a cached range then + * we see if the next cached range starts before the end of the query range. + */ +bool scoutfs_item_range_cached(struct super_block *sb, + struct scoutfs_key_buf *start, + struct scoutfs_key_buf *end, bool dirty) +{ + struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); + struct item_cache *cac = sbi->item_cache; + struct cached_range *next; + struct cached_range *rng; + unsigned long flags; + bool cached = false; + + spin_lock_irqsave(&cac->lock, flags); + + if (dirty) { + if (dirty_item_within(&cac->items, start, end)) + cached = true; + } else { + rng = walk_ranges(&cac->ranges, start, NULL, &next); + if (rng || (next && scoutfs_key_compare(next->start, end) <= 0)) + cached = true; + } + + spin_unlock_irqrestore(&cac->lock, flags); + + return cached; +} + /* * Returns true if adding more items with the given count, keys, and values * still fits in a single item along with the current dirty items. @@ -1774,7 +1822,6 @@ int scoutfs_item_writeback(struct super_block *sb, { struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); struct item_cache *cac = sbi->item_cache; - struct cached_item *item; unsigned long flags; bool sync = false; int count = 0; @@ -1784,14 +1831,9 @@ int scoutfs_item_writeback(struct super_block *sb, spin_lock_irqsave(&cac->lock, flags); - if (cac->nr_dirty_items) { - item = next_item(&cac->items, start); - if (item && !(item->dirty & ITEM_DIRTY)) - item = next_dirty(item); - if (item && scoutfs_key_compare(item->key, end) <= 0) { - sync = true; - count = cac->nr_dirty_items; - } + if (cac->nr_dirty_items && dirty_item_within(&cac->items, start, end)) { + sync = true; + count = cac->nr_dirty_items; } spin_unlock_irqrestore(&cac->lock, flags); diff --git a/kmod/src/item.h b/kmod/src/item.h index 453ec5ed..7265c7d4 100644 --- a/kmod/src/item.h +++ b/kmod/src/item.h @@ -59,6 +59,9 @@ int scoutfs_item_set_batch(struct super_block *sb, struct list_head *list, void scoutfs_item_free_batch(struct super_block *sb, struct list_head *list); bool scoutfs_item_has_dirty(struct super_block *sb); +bool scoutfs_item_range_cached(struct super_block *sb, + struct scoutfs_key_buf *start, + struct scoutfs_key_buf *end, bool dirty); bool scoutfs_item_dirty_fits_single(struct super_block *sb, u32 nr_items, u32 key_bytes, u32 val_bytes); int scoutfs_item_dirty_seg(struct super_block *sb, struct scoutfs_segment *seg);