From 233fbb39f343d9e42555408a9083b8d2d06c91c5 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Wed, 27 Jul 2022 14:19:44 -0700 Subject: [PATCH] Limit alloc_move per-call allocator consumption Recently scoutfs_alloc_move() was changed to try and limit the amount of metadata blocks it could allocate or free. The intent was to stop concurrent holders of a transaction from fully consuming the available allocator for the transaction. The limiting logic was a bit off. It stopped when the allocator had the caller's limit remaining, not when it had consumed the caller's limit. This is overly permissive and could still allow concurrent callers to consume the allocator. It was also triggering warning messages when a call consumed more than its allowed budget while holding a transaction. Unfortunately, we don't have per-caller tracking of allocator resource consumption. The best we can do is sample the allocators as we start and return if they drop by the caller's limit. This is overly conservative in that it accounts any consumption during concurrent callers to all callers. This isn't perfect but it makes the failure case less likely and the impact shouldn't be significant. We don't often have a lot of concurrency and the limits are larger than callers will typically consume. Signed-off-by: Zach Brown --- kmod/src/alloc.c | 26 +++++++++++++++----------- kmod/src/alloc.h | 2 +- kmod/src/server.c | 4 ++-- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/kmod/src/alloc.c b/kmod/src/alloc.c index 0284150e..a36387b7 100644 --- a/kmod/src/alloc.c +++ b/kmod/src/alloc.c @@ -892,12 +892,11 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r * -ENOENT is returned if we run out of extents in the source tree * before moving the total. * - * If meta_reserved is non-zero then -EINPROGRESS can be returned if the - * current meta allocator's avail blocks or room for freed blocks would - * have fallen under the reserved amount. The could have been - * successfully dirtied in this case but the number of blocks moved is - * not returned. The caller is expected to deal with the partial - * progress by commiting the dirty trees and examining the resulting + * If meta_budget is non-zero then -EINPROGRESS can be returned if the + * the caller's budget is consumed in the allocator during this call + * (though not necessarily by us, we don't have per-thread tracking of + * allocator consumption :/). The call can still have made progress and + * caller is expected commit the dirty trees and examining the resulting * modified trees to see if they need to continue moving extents. * * The caller can specify that extents in the source tree should first @@ -914,7 +913,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc, struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *dst, struct scoutfs_alloc_root *src, u64 total, - __le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_reserved) + __le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget) { struct alloc_ext_args args = { .alloc = alloc, @@ -922,6 +921,8 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc, }; struct scoutfs_extent found; struct scoutfs_extent ext; + u32 avail_start = 0; + u32 freed_start = 0; u64 moved = 0; u64 count; int ret = 0; @@ -932,6 +933,9 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc, vacant = NULL; } + if (meta_budget != 0) + scoutfs_alloc_meta_remaining(alloc, &avail_start, &freed_start); + while (moved < total) { count = total - moved; @@ -964,10 +968,10 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc, if (ret < 0) break; - if (meta_reserved != 0 && - scoutfs_alloc_meta_low(sb, alloc, meta_reserved + - extent_mod_blocks(src->root.height) + - extent_mod_blocks(dst->root.height))) { + if (meta_budget != 0 && + scoutfs_alloc_meta_low_since(alloc, avail_start, freed_start, meta_budget, + extent_mod_blocks(src->root.height) + + extent_mod_blocks(dst->root.height))) { ret = -EINPROGRESS; break; } diff --git a/kmod/src/alloc.h b/kmod/src/alloc.h index 5e0d4ff3..4496802c 100644 --- a/kmod/src/alloc.h +++ b/kmod/src/alloc.h @@ -131,7 +131,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc, struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *dst, struct scoutfs_alloc_root *src, u64 total, - __le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_reserved); + __le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget); int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc, struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root, u64 start, u64 len); diff --git a/kmod/src/server.c b/kmod/src/server.c index 32e99ecd..17f38481 100644 --- a/kmod/src/server.c +++ b/kmod/src/server.c @@ -694,13 +694,13 @@ static int alloc_move_refill_zoned(struct super_block *sb, struct scoutfs_alloc_ static int alloc_move_empty(struct super_block *sb, struct scoutfs_alloc_root *dst, - struct scoutfs_alloc_root *src, u64 meta_reserved) + struct scoutfs_alloc_root *src, u64 meta_budget) { DECLARE_SERVER_INFO(sb, server); return scoutfs_alloc_move(sb, &server->alloc, &server->wri, dst, src, le64_to_cpu(src->total_len), NULL, NULL, 0, - meta_reserved); + meta_budget); } /*