From 7b121d9860286e3372e9419405c5b6fd8700a01b Mon Sep 17 00:00:00 2001 From: Hunter Shaffer Date: Tue, 25 Feb 2025 16:27:07 -0500 Subject: [PATCH] small btree balancing While we are filling blocks the final block may not have enough items to properly fill that block. Here we add a check that stops filling the block if we have less than the minimum amount of items. Signed-off-by: Hunter Shaffer --- utils/src/parallel_restore.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/utils/src/parallel_restore.c b/utils/src/parallel_restore.c index ecd84ca4..4a1ab3fd 100644 --- a/utils/src/parallel_restore.c +++ b/utils/src/parallel_restore.c @@ -1030,6 +1030,8 @@ static spr_err_t build_btree_block(struct scoutfs_parallel_restore_writer *wri, unsigned long val_align; unsigned long bytes; unsigned long nr; + unsigned long min_items; + long item_bytes_after_block; void *val_buf; spr_err_t err; u8 height; @@ -1083,10 +1085,16 @@ static spr_err_t build_btree_block(struct scoutfs_parallel_restore_writer *wri, for_each_bti_safe(&btb->items[level].root, bti, tmp) { val_align = round_up(bti->val_len, SCOUTFS_BTREE_VALUE_ALIGN); bytes = sizeof(struct scoutfs_btree_item) + val_align; + item_bytes_after_block = (le64_to_cpu(btb->total_items) * bytes) - le16_to_cpu(bt->mid_free_len); + min_items = (SCOUTFS_BLOCK_LG_SIZE - sizeof(struct scoutfs_btree_block)) / 4; if (le16_to_cpu(bt->mid_free_len) < bytes) break; + /* stop when there are not enough items to fill the next block */ + if (item_bytes_after_block > 0 && item_bytes_after_block < min_items) + break; + item->node.height = UNLINKED_AVL_HEIGHT; item->key = bti->key; item->seq = cpu_to_le64(1);