Limit alloc_move per-call allocator consumption

Recently scoutfs_alloc_move() was changed to try and limit the amount of
metadata blocks it could allocate or free.  The intent was to stop
concurrent holders of a transaction from fully consuming the available
allocator for the transaction.

The limiting logic was a bit off.  It stopped when the allocator had the
caller's limit remaining, not when it had consumed the caller's limit.
This is overly permissive and could still allow concurrent callers to
consume the allocator.  It was also triggering warning messages when a
call consumed more than its allowed budget while holding a transaction.

Unfortunately, we don't have per-caller tracking of allocator resource
consumption.  The best we can do is sample the allocators as we start
and return if they drop by the caller's limit.  This is overly
conservative in that it accounts any consumption during concurrent
callers to all callers.

This isn't perfect but it makes the failure case less likely and the
impact shouldn't be significant.  We don't often have a lot of
concurrency and the limits are larger than callers will typically
consume.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2022-07-27 14:19:44 -07:00
parent 198d3cda32
commit 233fbb39f3
3 changed files with 18 additions and 14 deletions

View File

@@ -892,12 +892,11 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r
* -ENOENT is returned if we run out of extents in the source tree * -ENOENT is returned if we run out of extents in the source tree
* before moving the total. * before moving the total.
* *
* If meta_reserved is non-zero then -EINPROGRESS can be returned if the * If meta_budget is non-zero then -EINPROGRESS can be returned if the
* current meta allocator's avail blocks or room for freed blocks would * the caller's budget is consumed in the allocator during this call
* have fallen under the reserved amount. The could have been * (though not necessarily by us, we don't have per-thread tracking of
* successfully dirtied in this case but the number of blocks moved is * allocator consumption :/). The call can still have made progress and
* not returned. The caller is expected to deal with the partial * caller is expected commit the dirty trees and examining the resulting
* progress by commiting the dirty trees and examining the resulting
* modified trees to see if they need to continue moving extents. * modified trees to see if they need to continue moving extents.
* *
* The caller can specify that extents in the source tree should first * The caller can specify that extents in the source tree should first
@@ -914,7 +913,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, struct scoutfs_block_writer *wri,
struct scoutfs_alloc_root *dst, struct scoutfs_alloc_root *dst,
struct scoutfs_alloc_root *src, u64 total, struct scoutfs_alloc_root *src, u64 total,
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_reserved) __le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget)
{ {
struct alloc_ext_args args = { struct alloc_ext_args args = {
.alloc = alloc, .alloc = alloc,
@@ -922,6 +921,8 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
}; };
struct scoutfs_extent found; struct scoutfs_extent found;
struct scoutfs_extent ext; struct scoutfs_extent ext;
u32 avail_start = 0;
u32 freed_start = 0;
u64 moved = 0; u64 moved = 0;
u64 count; u64 count;
int ret = 0; int ret = 0;
@@ -932,6 +933,9 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
vacant = NULL; vacant = NULL;
} }
if (meta_budget != 0)
scoutfs_alloc_meta_remaining(alloc, &avail_start, &freed_start);
while (moved < total) { while (moved < total) {
count = total - moved; count = total - moved;
@@ -964,10 +968,10 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
if (ret < 0) if (ret < 0)
break; break;
if (meta_reserved != 0 && if (meta_budget != 0 &&
scoutfs_alloc_meta_low(sb, alloc, meta_reserved + scoutfs_alloc_meta_low_since(alloc, avail_start, freed_start, meta_budget,
extent_mod_blocks(src->root.height) + extent_mod_blocks(src->root.height) +
extent_mod_blocks(dst->root.height))) { extent_mod_blocks(dst->root.height))) {
ret = -EINPROGRESS; ret = -EINPROGRESS;
break; break;
} }

View File

@@ -131,7 +131,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, struct scoutfs_block_writer *wri,
struct scoutfs_alloc_root *dst, struct scoutfs_alloc_root *dst,
struct scoutfs_alloc_root *src, u64 total, struct scoutfs_alloc_root *src, u64 total,
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_reserved); __le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget);
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc, int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root, struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
u64 start, u64 len); u64 start, u64 len);

View File

@@ -694,13 +694,13 @@ static int alloc_move_refill_zoned(struct super_block *sb, struct scoutfs_alloc_
static int alloc_move_empty(struct super_block *sb, static int alloc_move_empty(struct super_block *sb,
struct scoutfs_alloc_root *dst, struct scoutfs_alloc_root *dst,
struct scoutfs_alloc_root *src, u64 meta_reserved) struct scoutfs_alloc_root *src, u64 meta_budget)
{ {
DECLARE_SERVER_INFO(sb, server); DECLARE_SERVER_INFO(sb, server);
return scoutfs_alloc_move(sb, &server->alloc, &server->wri, return scoutfs_alloc_move(sb, &server->alloc, &server->wri,
dst, src, le64_to_cpu(src->total_len), NULL, NULL, 0, dst, src, le64_to_cpu(src->total_len), NULL, NULL, 0,
meta_reserved); meta_budget);
} }
/* /*