From 67cc4fb6971639de44d211b806e5d0f2cdb37455 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Wed, 12 Jul 2017 14:13:23 -0700 Subject: [PATCH] scoutfs: allow NULL end around read_items Let both check_range and read_items take a NULL end. check_range just doesn't do anything with the end of the range. read_items defaults to trying to read as many items as it can but clamps to the extent of the segments that intersect with the key. This will let us incrementally add end arguments to the item functions that are intially passed in as NULL in callers as we add lock coverage. Signed-off-by: Zach Brown --- kmod/src/item.c | 13 ++++++++----- kmod/src/manifest.c | 18 ++++++++++++------ 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/kmod/src/item.c b/kmod/src/item.c index 977ae63d..9dcc38c9 100644 --- a/kmod/src/item.c +++ b/kmod/src/item.c @@ -538,15 +538,18 @@ static bool check_range(struct super_block *sb, struct rb_root *root, rng = walk_ranges(&cac->ranges, key, NULL, &next); if (rng) { - scoutfs_key_copy(end, rng->end); scoutfs_inc_counter(sb, item_range_hit); + if (end) + scoutfs_key_copy(end, rng->end); return true; } - if (next) - scoutfs_key_copy(end, next->start); - else - scoutfs_key_set_max(end); + if (end) { + if (next) + scoutfs_key_copy(end, next->start); + else + scoutfs_key_set_max(end); + } scoutfs_inc_counter(sb, item_range_miss); return false; diff --git a/kmod/src/manifest.c b/kmod/src/manifest.c index 7cad79cd..e8065724 100644 --- a/kmod/src/manifest.c +++ b/kmod/src/manifest.c @@ -539,9 +539,8 @@ out: * The caller found a hole in the item cache that they'd like populated. * * We search the manifest for all the segments we'll need to iterate - * from the key to the end key. We walk the segments and insert as many - * items as we can from the segments, trying to amortize the per-item - * cost of segment searching. + * from the key to the end key. If the end key is null then we'll read + * as many items as the intersecting segments contain. * * As we insert the batch of items we give the item cache the range of * keys that contain these items. This lets the cache return negative @@ -569,6 +568,7 @@ int scoutfs_manifest_read_items(struct super_block *sb, struct scoutfs_key_buf batch_end; struct scoutfs_key_buf seg_end; struct scoutfs_btree_root root; + struct scoutfs_inode_key junk; SCOUTFS_DECLARE_KVEC(item_val); SCOUTFS_DECLARE_KVEC(found_val); struct scoutfs_segment *seg; @@ -585,7 +585,14 @@ int scoutfs_manifest_read_items(struct super_block *sb, int err; int cmp; - trace_scoutfs_read_items(sb, key, end); + if (end) { + scoutfs_key_clone(&seg_end, end); + } else { + scoutfs_key_init(&seg_end, &junk, sizeof(junk)); + scoutfs_key_set_max(&seg_end); + } + + trace_scoutfs_read_items(sb, key, &seg_end); /* @@ -599,7 +606,7 @@ int scoutfs_manifest_read_items(struct super_block *sb, goto out; /* get refs on all the segments */ - ret = get_manifest_refs(sb, &root, key, end, &ref_list); + ret = get_manifest_refs(sb, &root, key, &seg_end, &ref_list); if (ret) goto out; @@ -642,7 +649,6 @@ int scoutfs_manifest_read_items(struct super_block *sb, * those segments because other segments might overlap after * that. */ - scoutfs_key_clone(&seg_end, end); list_for_each_entry(ref, &ref_list, entry) { if (ref->level > 0 && scoutfs_key_compare(ref->last, &seg_end) < 0) {