mirror of
https://github.com/versity/scoutfs.git
synced 2026-04-20 21:40:29 +00:00
scoutfs mkfs had two block writing functions: write_block to fill out some block header fields including crc calculation, and then write_block_raw to pwrite the raw buffer to the bytes in the device. These were used inconsistenly as blocks came and went over time. Most callers filled out all the header fields themselves and called the raw writer. write_block was only used for super writing, which made sense because it clobbered the block's header with the super header so the caller's set header magic and seq fields would be lost. This cleans up the mess. We only have one block writer and the caller provides all the hdr fields. Everything uses it instead of filling out the fields themselves and calling the raw writer. Signed-off-by: Zach Brown <zab@versity.com>
88 lines
2.2 KiB
C
88 lines
2.2 KiB
C
#include <assert.h>
|
|
|
|
#include "sparse.h"
|
|
#include "util.h"
|
|
#include "format.h"
|
|
#include "key.h"
|
|
#include "avl.h"
|
|
#include "leaf_item_hash.h"
|
|
#include "btree.h"
|
|
|
|
static void init_block(struct scoutfs_btree_block *bt, int level)
|
|
{
|
|
int free;
|
|
|
|
free = SCOUTFS_BLOCK_LG_SIZE - sizeof(struct scoutfs_btree_block);
|
|
if (level == 0)
|
|
free -= SCOUTFS_BTREE_LEAF_ITEM_HASH_BYTES;
|
|
|
|
bt->level = level;
|
|
bt->mid_free_len = cpu_to_le16(free);
|
|
}
|
|
|
|
/*
|
|
* Point the root at the single leaf block that makes up a btree.
|
|
*/
|
|
void btree_init_root_single(struct scoutfs_btree_root *root,
|
|
struct scoutfs_btree_block *bt,
|
|
u64 seq, u64 blkno)
|
|
{
|
|
root->ref.blkno = cpu_to_le64(blkno);
|
|
root->ref.seq = cpu_to_le64(seq);
|
|
root->height = 1;
|
|
|
|
memset(bt, 0, SCOUTFS_BLOCK_LG_SIZE);
|
|
|
|
init_block(bt, 0);
|
|
}
|
|
|
|
static void *alloc_val(struct scoutfs_btree_block *bt, int len)
|
|
{
|
|
le16_add_cpu(&bt->mid_free_len, -len);
|
|
le16_add_cpu(&bt->total_item_bytes, len);
|
|
return (void *)bt + le16_to_cpu(bt->mid_free_len);
|
|
}
|
|
|
|
/*
|
|
* Add a sorted item after all the items in the block.
|
|
*
|
|
* We simply implement the special case of a wildly imbalanced avl tree.
|
|
* Mkfs only ever inserts a handful of items and they'll be rebalanced
|
|
* over time.
|
|
*/
|
|
void btree_append_item(struct scoutfs_btree_block *bt,
|
|
struct scoutfs_key *key, void *val, int val_len)
|
|
{
|
|
struct scoutfs_btree_item *item;
|
|
struct scoutfs_avl_node *prev;
|
|
void *val_buf;
|
|
|
|
item = &bt->items[le16_to_cpu(bt->nr_items)];
|
|
|
|
if (bt->nr_items) {
|
|
assert(scoutfs_key_compare(key, &(item - 1)->key) > 0);
|
|
prev = &(item - 1)->node;
|
|
|
|
item->node.height = prev->height++;
|
|
item->node.left = avl_node_off(&bt->item_root, prev);
|
|
prev->parent = avl_node_off(&bt->item_root, &item->node);
|
|
}
|
|
|
|
bt->item_root.node = avl_node_off(&bt->item_root, &item->node);
|
|
le16_add_cpu(&bt->nr_items, 1);
|
|
le16_add_cpu(&bt->mid_free_len,
|
|
-(u16)sizeof(struct scoutfs_btree_item));
|
|
le16_add_cpu(&bt->total_item_bytes, sizeof(struct scoutfs_btree_item));
|
|
|
|
item->key = *key;
|
|
leaf_item_hash_insert(bt, &item->key,
|
|
cpu_to_le16((void *)item - (void *)bt));
|
|
if (val_len == 0)
|
|
return;
|
|
|
|
val_buf = alloc_val(bt, val_len);
|
|
item->val_off = cpu_to_le16((void *)val_buf - (void *)bt);
|
|
item->val_len = cpu_to_le16(val_len);
|
|
memcpy(val_buf, val, val_len);
|
|
}
|