scoutfs: allow NULL end around read_items

Let both check_range and read_items take a NULL end.  check_range just
doesn't do anything with the end of the range.  read_items defaults
to trying to read as many items as it can but clamps to the extent of
the segments that intersect with the key.

This will let us incrementally add end arguments to the item functions
that are intially passed in as NULL in callers as we add lock coverage.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2017-07-12 14:13:23 -07:00
parent 11a8570117
commit 67cc4fb697
2 changed files with 20 additions and 11 deletions

View File

@@ -538,15 +538,18 @@ static bool check_range(struct super_block *sb, struct rb_root *root,
rng = walk_ranges(&cac->ranges, key, NULL, &next);
if (rng) {
scoutfs_key_copy(end, rng->end);
scoutfs_inc_counter(sb, item_range_hit);
if (end)
scoutfs_key_copy(end, rng->end);
return true;
}
if (next)
scoutfs_key_copy(end, next->start);
else
scoutfs_key_set_max(end);
if (end) {
if (next)
scoutfs_key_copy(end, next->start);
else
scoutfs_key_set_max(end);
}
scoutfs_inc_counter(sb, item_range_miss);
return false;

View File

@@ -539,9 +539,8 @@ out:
* The caller found a hole in the item cache that they'd like populated.
*
* We search the manifest for all the segments we'll need to iterate
* from the key to the end key. We walk the segments and insert as many
* items as we can from the segments, trying to amortize the per-item
* cost of segment searching.
* from the key to the end key. If the end key is null then we'll read
* as many items as the intersecting segments contain.
*
* As we insert the batch of items we give the item cache the range of
* keys that contain these items. This lets the cache return negative
@@ -569,6 +568,7 @@ int scoutfs_manifest_read_items(struct super_block *sb,
struct scoutfs_key_buf batch_end;
struct scoutfs_key_buf seg_end;
struct scoutfs_btree_root root;
struct scoutfs_inode_key junk;
SCOUTFS_DECLARE_KVEC(item_val);
SCOUTFS_DECLARE_KVEC(found_val);
struct scoutfs_segment *seg;
@@ -585,7 +585,14 @@ int scoutfs_manifest_read_items(struct super_block *sb,
int err;
int cmp;
trace_scoutfs_read_items(sb, key, end);
if (end) {
scoutfs_key_clone(&seg_end, end);
} else {
scoutfs_key_init(&seg_end, &junk, sizeof(junk));
scoutfs_key_set_max(&seg_end);
}
trace_scoutfs_read_items(sb, key, &seg_end);
/*
@@ -599,7 +606,7 @@ int scoutfs_manifest_read_items(struct super_block *sb,
goto out;
/* get refs on all the segments */
ret = get_manifest_refs(sb, &root, key, end, &ref_list);
ret = get_manifest_refs(sb, &root, key, &seg_end, &ref_list);
if (ret)
goto out;
@@ -642,7 +649,6 @@ int scoutfs_manifest_read_items(struct super_block *sb,
* those segments because other segments might overlap after
* that.
*/
scoutfs_key_clone(&seg_end, end);
list_for_each_entry(ref, &ref_list, entry) {
if (ref->level > 0 &&
scoutfs_key_compare(ref->last, &seg_end) < 0) {