mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-04 11:24:21 +00:00
Update format to recent utils changes
The format was updated while implementing mkfs and print in scoutfs-utils. Bring the kernel code up to speed. For some reason I changed the name of the item length in the item header struct. Who knows. Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
@@ -1,11 +1,136 @@
|
||||
#ifndef _SCOUTFS_FORMAT_H_
|
||||
#define _SCOUTFS_FORMAT_H_
|
||||
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
/* super block id */
|
||||
#define SCOUTFS_SUPER_ID 0x2e736674756f6373ULL /* "scoutfs." */
|
||||
|
||||
/*
|
||||
* Some fs structures are stored in smaller fixed size 4k bricks.
|
||||
*/
|
||||
#define SCOUTFS_BRICK_SHIFT 12
|
||||
#define SCOUTFS_BRICK_SIZE (1 << SCOUTFS_BRICK_SHIFT)
|
||||
|
||||
/*
|
||||
* A large block size reduces the amount of per-block overhead throughout
|
||||
* the system: block IO, manifest communications and storage, etc.
|
||||
*/
|
||||
#define SCOUTFS_BLOCK_SHIFT 22
|
||||
#define SCOUTFS_BLOCK_SIZE (1 << SCOUTFS_BLOCK_SHIFT)
|
||||
|
||||
/* for shifting between brick and block numbers */
|
||||
#define SCOUTFS_BLOCK_BRICK (SCOUTFS_BLOCK_SHIFT - SCOUTFS_BRICK_SHIFT)
|
||||
|
||||
/*
|
||||
* The super bricks leave a bunch of room at the start of the first
|
||||
* block for platform structures like boot loaders.
|
||||
*/
|
||||
#define SCOUTFS_SUPER_BRICK 16
|
||||
|
||||
/*
|
||||
* This header is found at the start of every brick and block
|
||||
* so that we can verify that it's what we were looking for.
|
||||
*/
|
||||
struct scoutfs_header {
|
||||
__le32 crc;
|
||||
__le64 fsid;
|
||||
__le64 seq;
|
||||
__le64 nr;
|
||||
} __packed;
|
||||
|
||||
#define SCOUTFS_UUID_BYTES 16
|
||||
|
||||
/*
|
||||
* The super is stored in a pair of bricks in the first block.
|
||||
*/
|
||||
struct scoutfs_super {
|
||||
struct scoutfs_header hdr;
|
||||
__le64 id;
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 total_blocks;
|
||||
__le64 ring_layout_block;
|
||||
__le64 ring_layout_seq;
|
||||
__le64 last_ring_brick;
|
||||
__le64 last_ring_seq;
|
||||
__le64 last_block_seq;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* We should be able to make the offset smaller if neither dirents nor
|
||||
* data items use the full 64 bits.
|
||||
*/
|
||||
struct scoutfs_key {
|
||||
__le64 inode;
|
||||
u8 type;
|
||||
__le64 offset;
|
||||
} __packed;
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
|
||||
#define SCOUTFS_INODE_KEY 128
|
||||
#define SCOUTFS_DIRENT_KEY 192
|
||||
|
||||
struct scoutfs_ring_layout {
|
||||
struct scoutfs_header hdr;
|
||||
__le32 nr_blocks;
|
||||
__le64 blocks[0];
|
||||
} __packed;
|
||||
|
||||
struct scoutfs_ring_entry {
|
||||
u8 type;
|
||||
__le16 len;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Ring blocks are 4k blocks stored inside the large ring blocks
|
||||
* referenced by the ring descriptor block.
|
||||
*
|
||||
* The manifest entries describe the position of a given block in the
|
||||
* manifest. They're keyed by the block number so that we can log
|
||||
* movement of a block in the manifest with one log entry and we can log
|
||||
* deletion with just the block number.
|
||||
*/
|
||||
struct scoutfs_ring_brick {
|
||||
struct scoutfs_header hdr;
|
||||
__le16 nr_entries;
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
SCOUTFS_RING_REMOVE_MANIFEST = 0,
|
||||
SCOUTFS_RING_ADD_MANIFEST,
|
||||
SCOUTFS_RING_BITMAP,
|
||||
};
|
||||
|
||||
/*
|
||||
* Manifest entries are logged by their block number. This lets us log
|
||||
* a change with one entry and a removal with a tiny block number
|
||||
* without the key.
|
||||
*/
|
||||
struct scoutfs_ring_remove_manifest {
|
||||
__le64 block;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Including both keys might make the manifest too large. It might be
|
||||
* better to only include one key and infer a block's range from the
|
||||
* neighbour's key. The downside of that is that we assume that there
|
||||
* isn't unused key space between blocks in a level. We might search
|
||||
* blocks when we didn't need to.
|
||||
*/
|
||||
struct scoutfs_ring_add_manifest {
|
||||
__le64 block;
|
||||
__le64 seq;
|
||||
__u8 level;
|
||||
struct scoutfs_key first;
|
||||
struct scoutfs_key last;
|
||||
} __packed;
|
||||
|
||||
struct scoutfs_ring_bitmap {
|
||||
__le32 offset;
|
||||
__le64 bits[2];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* This bloom size is chosen to have a roughly 1% false positive rate
|
||||
* for ~90k items which is roughly the worst case for a block full of
|
||||
@@ -18,20 +143,8 @@
|
||||
#define SCOUTFS_BLOOM_INDEX_MASK ((1 << SCOUTFS_BLOOM_INDEX_BITS) - 1)
|
||||
#define SCOUTFS_BLOOM_INDEX_NR 7
|
||||
|
||||
/*
|
||||
* We should be able to make the offset smaller if neither dirents nor
|
||||
* data items use the full 64 bits.
|
||||
*/
|
||||
struct scoutfs_key {
|
||||
__le64 inode;
|
||||
u8 type;
|
||||
__le64 offset;
|
||||
} __packed;
|
||||
|
||||
#define SCOUTFS_INODE_KEY 128
|
||||
#define SCOUTFS_DIRENT_KEY 192
|
||||
|
||||
struct scoutfs_lsm_block {
|
||||
struct scoutfs_header hdr;
|
||||
struct scoutfs_key first;
|
||||
struct scoutfs_key last;
|
||||
__le32 nr_items;
|
||||
@@ -41,10 +154,9 @@ struct scoutfs_lsm_block {
|
||||
|
||||
struct scoutfs_item_header {
|
||||
struct scoutfs_key key;
|
||||
__le16 val_len;
|
||||
__le16 len;
|
||||
} __packed;
|
||||
|
||||
|
||||
struct scoutfs_timespec {
|
||||
__le64 sec;
|
||||
__le32 nsec;
|
||||
|
||||
@@ -236,7 +236,7 @@ static struct scoutfs_item *dirty_block_pages(struct super_block *sb,
|
||||
trace_printk("item %p key "CKF"\n", item, CKA(&item->key));
|
||||
|
||||
ihdr.key = item->key;
|
||||
ihdr.val_len = cpu_to_le16(item->val_len);
|
||||
ihdr.len = cpu_to_le16(item->val_len);
|
||||
ret = copy_to_pages(&pgs, &ihdr, sizeof(ihdr));
|
||||
if (ret > 0)
|
||||
ret = copy_to_pages(&pgs, item->val, item->val_len);
|
||||
|
||||
Reference in New Issue
Block a user