mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-07 04:26:29 +00:00
Block API offers scoutfs_block instead of bh
Our thin block wrappers exposed buffer heads to all the callers. We're about to revert back to the block interface that uses its own scoutfs_block struct instead of buffer heads. Let's reduce the churn of that patch by first having the block API give callers an opaque struct scoutfs_block. Internally it's still buffer heads but the callers don't know that. scoutfs_write_dirty_super() is the exception who has magical knowledge of buffer heads. That's fixed once the new block API offers a function for writing a single block. Signed-off-by: Zach Brown <zab@versity.com> Reviewed-by: Mark Fasheh <mfasheh@versity.com>
This commit is contained in:
126
kmod/src/block.c
126
kmod/src/block.c
@@ -38,6 +38,8 @@
|
||||
* - should invalidate dirty blocks if freed
|
||||
*/
|
||||
|
||||
struct scoutfs_block;
|
||||
|
||||
struct block_bh_private {
|
||||
struct super_block *sb;
|
||||
struct buffer_head *bh;
|
||||
@@ -183,7 +185,7 @@ static void erase_bhp(struct buffer_head *bh)
|
||||
* Read an existing block from the device and verify its metadata header.
|
||||
* The buffer head is returned unlocked and uptodate.
|
||||
*/
|
||||
struct buffer_head *scoutfs_block_read(struct super_block *sb, u64 blkno)
|
||||
struct scoutfs_block *scoutfs_block_read(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct buffer_head *bh;
|
||||
@@ -206,13 +208,13 @@ struct buffer_head *scoutfs_block_read(struct super_block *sb, u64 blkno)
|
||||
}
|
||||
unlock_buffer(bh);
|
||||
if (ret < 0) {
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put((void *)bh);
|
||||
bh = ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return bh;
|
||||
return (void *)bh;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -228,23 +230,23 @@ out:
|
||||
* - reads that span transactions?
|
||||
* - writers creating a new dirty block?
|
||||
*/
|
||||
struct buffer_head *scoutfs_block_read_ref(struct super_block *sb,
|
||||
struct scoutfs_block_ref *ref)
|
||||
struct scoutfs_block *scoutfs_block_read_ref(struct super_block *sb,
|
||||
struct scoutfs_block_ref *ref)
|
||||
{
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
|
||||
bh = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (!IS_ERR(bh)) {
|
||||
hdr = bh_data(bh);
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (!IS_ERR(bl)) {
|
||||
hdr = scoutfs_block_data(bl);
|
||||
if (WARN_ON_ONCE(hdr->seq != ref->seq)) {
|
||||
clear_buffer_uptodate(bh);
|
||||
brelse(bh);
|
||||
bh = ERR_PTR(-EAGAIN);
|
||||
clear_buffer_uptodate(bl);
|
||||
scoutfs_block_put(bl);
|
||||
bl = ERR_PTR(-EAGAIN);
|
||||
}
|
||||
}
|
||||
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -309,7 +311,7 @@ int scoutfs_block_write_dirty(struct super_block *sb)
|
||||
spin_unlock_irqrestore(&sbi->block_lock, flags);
|
||||
|
||||
atomic_inc(&sbi->block_writes);
|
||||
scoutfs_block_set_crc(bh);
|
||||
scoutfs_block_set_crc((void *)bh);
|
||||
|
||||
lock_buffer(bh);
|
||||
|
||||
@@ -357,38 +359,40 @@ int scoutfs_block_has_dirty(struct super_block *sb)
|
||||
* Callers are responsible for serializing modification to the reference
|
||||
* which is probably embedded in some other dirty persistent structure.
|
||||
*/
|
||||
struct buffer_head *scoutfs_block_dirty_ref(struct super_block *sb,
|
||||
struct scoutfs_block_ref *ref)
|
||||
struct scoutfs_block *scoutfs_block_dirty_ref(struct super_block *sb,
|
||||
struct scoutfs_block_ref *ref)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct buffer_head *copy_bh = NULL;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *copy_bl = NULL;
|
||||
struct scoutfs_block *bl;
|
||||
u64 blkno = 0;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
bh = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (IS_ERR(bh) || ref->seq == sbi->super.hdr.seq)
|
||||
return bh;
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (IS_ERR(bl) || ref->seq == sbi->super.hdr.seq)
|
||||
return bl;
|
||||
|
||||
ret = scoutfs_buddy_alloc_same(sb, &blkno, le64_to_cpu(ref->blkno));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
copy_bh = scoutfs_block_dirty(sb, blkno);
|
||||
if (IS_ERR(copy_bh)) {
|
||||
ret = PTR_ERR(copy_bh);
|
||||
copy_bl = scoutfs_block_dirty(sb, blkno);
|
||||
if (IS_ERR(copy_bl)) {
|
||||
ret = PTR_ERR(copy_bl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_buddy_free(sb, ref->seq, bh->b_blocknr, 0);
|
||||
hdr = scoutfs_block_data(bl);
|
||||
ret = scoutfs_buddy_free(sb, hdr->seq, le64_to_cpu(hdr->blkno), 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
memcpy(copy_bh->b_data, bh->b_data, SCOUTFS_BLOCK_SIZE);
|
||||
memcpy(scoutfs_block_data(copy_bl), scoutfs_block_data(bl),
|
||||
SCOUTFS_BLOCK_SIZE);
|
||||
|
||||
hdr = bh_data(copy_bh);
|
||||
hdr = scoutfs_block_data(copy_bl);
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->seq = sbi->super.hdr.seq;
|
||||
ref->blkno = hdr->blkno;
|
||||
@@ -396,18 +400,18 @@ struct buffer_head *scoutfs_block_dirty_ref(struct super_block *sb,
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put(bl);
|
||||
if (ret) {
|
||||
if (!IS_ERR_OR_NULL(copy_bh)) {
|
||||
if (!IS_ERR_OR_NULL(copy_bl)) {
|
||||
err = scoutfs_buddy_free(sb, sbi->super.hdr.seq,
|
||||
copy_bh->b_blocknr, 0);
|
||||
blkno, 0);
|
||||
WARN_ON_ONCE(err); /* freeing dirty must work */
|
||||
}
|
||||
scoutfs_block_put(copy_bh);
|
||||
copy_bh = ERR_PTR(ret);
|
||||
scoutfs_block_put(copy_bl);
|
||||
copy_bl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return copy_bh;
|
||||
return copy_bl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -415,7 +419,7 @@ out:
|
||||
* the current dirty seq. Callers are responsible for serializing
|
||||
* access to the block and for zeroing unwritten block contents.
|
||||
*/
|
||||
struct buffer_head *scoutfs_block_dirty(struct super_block *sb, u64 blkno)
|
||||
struct scoutfs_block *scoutfs_block_dirty(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block_header *hdr;
|
||||
@@ -431,12 +435,12 @@ struct buffer_head *scoutfs_block_dirty(struct super_block *sb, u64 blkno)
|
||||
|
||||
ret = insert_bhp(sb, bh);
|
||||
if (ret < 0) {
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put((void *)bh);
|
||||
bh = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
hdr = bh_data(bh);
|
||||
hdr = scoutfs_block_data((void *)bh);
|
||||
*hdr = sbi->super.hdr;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->seq = sbi->super.hdr.seq;
|
||||
@@ -444,18 +448,18 @@ struct buffer_head *scoutfs_block_dirty(struct super_block *sb, u64 blkno)
|
||||
set_buffer_uptodate(bh);
|
||||
set_buffer_scoutfs_verified(bh);
|
||||
out:
|
||||
return bh;
|
||||
return (void *)bh;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new dirty writable block. The caller must be in a
|
||||
* transaction so that we can assign the dirty seq.
|
||||
*/
|
||||
struct buffer_head *scoutfs_block_dirty_alloc(struct super_block *sb)
|
||||
struct scoutfs_block *scoutfs_block_dirty_alloc(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->stable_super;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
int err;
|
||||
@@ -464,12 +468,12 @@ struct buffer_head *scoutfs_block_dirty_alloc(struct super_block *sb)
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
bh = scoutfs_block_dirty(sb, blkno);
|
||||
if (IS_ERR(bh)) {
|
||||
bl = scoutfs_block_dirty(sb, blkno);
|
||||
if (IS_ERR(bl)) {
|
||||
err = scoutfs_buddy_free(sb, super->hdr.seq, blkno, 0);
|
||||
WARN_ON_ONCE(err); /* freeing dirty must work */
|
||||
}
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -495,9 +499,9 @@ void scoutfs_block_forget(struct super_block *sb, u64 blkno)
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_block_set_crc(struct buffer_head *bh)
|
||||
void scoutfs_block_set_crc(struct scoutfs_block *bl)
|
||||
{
|
||||
struct scoutfs_block_header *hdr = bh_data(bh);
|
||||
struct scoutfs_block_header *hdr = scoutfs_block_data(bl);
|
||||
|
||||
hdr->crc = cpu_to_le32(scoutfs_crc_block(hdr));
|
||||
}
|
||||
@@ -505,26 +509,29 @@ void scoutfs_block_set_crc(struct buffer_head *bh)
|
||||
/*
|
||||
* Zero the block from the given byte to the end of the block.
|
||||
*/
|
||||
void scoutfs_block_zero(struct buffer_head *bh, size_t off)
|
||||
void scoutfs_block_zero(struct scoutfs_block *bl, size_t off)
|
||||
{
|
||||
if (WARN_ON_ONCE(off > SCOUTFS_BLOCK_SIZE))
|
||||
return;
|
||||
|
||||
if (off < SCOUTFS_BLOCK_SIZE)
|
||||
memset((char *)bh->b_data + off, 0, SCOUTFS_BLOCK_SIZE - off);
|
||||
memset(scoutfs_block_data(bl) + off, 0,
|
||||
SCOUTFS_BLOCK_SIZE - off);
|
||||
}
|
||||
|
||||
/*
|
||||
* Zero the block from the given byte to the end of the block.
|
||||
*/
|
||||
void scoutfs_block_zero_from(struct buffer_head *bh, void *ptr)
|
||||
void scoutfs_block_zero_from(struct scoutfs_block *bl, void *ptr)
|
||||
{
|
||||
return scoutfs_block_zero(bh, (char *)ptr - (char *)bh->b_data);
|
||||
return scoutfs_block_zero(bl, (char *)ptr -
|
||||
(char *)scoutfs_block_data(bl));
|
||||
}
|
||||
|
||||
void scoutfs_block_set_lock_class(struct buffer_head *bh,
|
||||
void scoutfs_block_set_lock_class(struct scoutfs_block *bl,
|
||||
struct lock_class_key *class)
|
||||
{
|
||||
struct buffer_head *bh = (void *)bl;
|
||||
struct block_bh_private *bhp = bh->b_private;
|
||||
|
||||
if (bhp && !bhp->rwsem_class) {
|
||||
@@ -533,8 +540,9 @@ void scoutfs_block_set_lock_class(struct buffer_head *bh,
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_block_lock(struct buffer_head *bh, bool write, int subclass)
|
||||
void scoutfs_block_lock(struct scoutfs_block *bl, bool write, int subclass)
|
||||
{
|
||||
struct buffer_head *bh = (void *)bl;
|
||||
struct block_bh_private *bhp = bh->b_private;
|
||||
|
||||
if (bhp) {
|
||||
@@ -545,8 +553,9 @@ void scoutfs_block_lock(struct buffer_head *bh, bool write, int subclass)
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_block_unlock(struct buffer_head *bh, bool write)
|
||||
void scoutfs_block_unlock(struct scoutfs_block *bl, bool write)
|
||||
{
|
||||
struct buffer_head *bh = (void *)bl;
|
||||
struct block_bh_private *bhp = bh->b_private;
|
||||
|
||||
if (bhp) {
|
||||
@@ -556,3 +565,18 @@ void scoutfs_block_unlock(struct buffer_head *bh, bool write)
|
||||
up_read(&bhp->rwsem);
|
||||
}
|
||||
}
|
||||
|
||||
void *scoutfs_block_data(struct scoutfs_block *bl)
|
||||
{
|
||||
struct buffer_head *bh = (void *)bl;
|
||||
|
||||
return (void *)bh->b_data;
|
||||
}
|
||||
|
||||
void scoutfs_block_put(struct scoutfs_block *bl)
|
||||
{
|
||||
struct buffer_head *bh = (void *)bl;
|
||||
|
||||
if (!IS_ERR_OR_NULL(bh))
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
@@ -1,42 +1,34 @@
|
||||
#ifndef _SCOUTFS_BLOCK_H_
|
||||
#define _SCOUTFS_BLOCK_H_
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/buffer_head.h>
|
||||
struct scoutfs_block;
|
||||
|
||||
struct buffer_head *scoutfs_block_read(struct super_block *sb, u64 blkno);
|
||||
struct buffer_head *scoutfs_block_read_ref(struct super_block *sb,
|
||||
#include <linux/fs.h>
|
||||
|
||||
struct scoutfs_block *scoutfs_block_read(struct super_block *sb, u64 blkno);
|
||||
struct scoutfs_block *scoutfs_block_read_ref(struct super_block *sb,
|
||||
struct scoutfs_block_ref *ref);
|
||||
|
||||
struct buffer_head *scoutfs_block_dirty(struct super_block *sb, u64 blkno);
|
||||
struct buffer_head *scoutfs_block_dirty_alloc(struct super_block *sb);
|
||||
struct buffer_head *scoutfs_block_dirty_ref(struct super_block *sb,
|
||||
struct scoutfs_block *scoutfs_block_dirty(struct super_block *sb, u64 blkno);
|
||||
struct scoutfs_block *scoutfs_block_dirty_alloc(struct super_block *sb);
|
||||
struct scoutfs_block *scoutfs_block_dirty_ref(struct super_block *sb,
|
||||
struct scoutfs_block_ref *ref);
|
||||
|
||||
int scoutfs_block_has_dirty(struct super_block *sb);
|
||||
int scoutfs_block_write_dirty(struct super_block *sb);
|
||||
|
||||
void scoutfs_block_set_crc(struct buffer_head *bh);
|
||||
void scoutfs_block_zero(struct buffer_head *bh, size_t off);
|
||||
void scoutfs_block_zero_from(struct buffer_head *bh, void *ptr);
|
||||
void scoutfs_block_set_crc(struct scoutfs_block *bl);
|
||||
void scoutfs_block_zero(struct scoutfs_block *bl, size_t off);
|
||||
void scoutfs_block_zero_from(struct scoutfs_block *bl, void *ptr);
|
||||
|
||||
void scoutfs_block_set_lock_class(struct buffer_head *bh,
|
||||
void scoutfs_block_set_lock_class(struct scoutfs_block *bl,
|
||||
struct lock_class_key *class);
|
||||
void scoutfs_block_lock(struct buffer_head *bh, bool write, int subclass);
|
||||
void scoutfs_block_unlock(struct buffer_head *bh, bool write);
|
||||
void scoutfs_block_lock(struct scoutfs_block *bl, bool write, int subclass);
|
||||
void scoutfs_block_unlock(struct scoutfs_block *bl, bool write);
|
||||
|
||||
void scoutfs_block_forget(struct super_block *sb, u64 blkno);
|
||||
|
||||
/* XXX seems like this should be upstream :) */
|
||||
static inline void *bh_data(struct buffer_head *bh)
|
||||
{
|
||||
return (void *)bh->b_data;
|
||||
}
|
||||
|
||||
static inline void scoutfs_block_put(struct buffer_head *bh)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(bh))
|
||||
brelse(bh);
|
||||
}
|
||||
void *scoutfs_block_data(struct scoutfs_block *bl);
|
||||
void scoutfs_block_put(struct scoutfs_block *bl);
|
||||
|
||||
#endif
|
||||
|
||||
337
kmod/src/btree.c
337
kmod/src/btree.c
@@ -58,7 +58,7 @@
|
||||
* XXX
|
||||
* - do we want a level in the btree header? seems like we would?
|
||||
* - validate structures on read?
|
||||
* - internal bh/pos/cmp interface is clumsy..
|
||||
* - internal bl/pos/cmp interface is clumsy..
|
||||
*/
|
||||
|
||||
/* number of contiguous bytes used by the item header and val of given len */
|
||||
@@ -451,14 +451,10 @@ static void compact_items(struct scoutfs_btree_block *bt)
|
||||
* consistent tree order.
|
||||
*
|
||||
* The cow tree updates let us skip block locking entirely for stable
|
||||
* blocks because they're read only. The block layer only has to worry
|
||||
* about locking blocks that could be written to. While they're
|
||||
* writable they have a buffer_head private that pins them in the
|
||||
* transaction and we store the block lock there. The block layer
|
||||
* ignores our locking attempts for read-only blocks.
|
||||
*
|
||||
* And all of the blocks referenced by the stable super will be stable
|
||||
* so we only try to lock at all when working with the dirty super.
|
||||
* blocks because they're read only. All the blocks in the stable
|
||||
* super tree are stable so we don't have to lock that tree at all.
|
||||
* We let the block layer use the header's seq to avoid locking
|
||||
* stable blocks.
|
||||
*
|
||||
* lockdep has to not be freaked out by all of this. The cascading
|
||||
* block locks really make it angry without annotation so we add classes
|
||||
@@ -466,24 +462,24 @@ static void compact_items(struct scoutfs_btree_block *bt)
|
||||
* during merge.
|
||||
*/
|
||||
|
||||
static void set_block_lock_class(struct buffer_head *bh, int level)
|
||||
static void set_block_lock_class(struct scoutfs_block *bl, int level)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static struct lock_class_key tree_depth_classes[SCOUTFS_BTREE_MAX_DEPTH];
|
||||
|
||||
scoutfs_block_set_lock_class(bh, &tree_depth_classes[level]);
|
||||
scoutfs_block_set_lock_class(bl, &tree_depth_classes[level]);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void lock_tree_block(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct buffer_head *bh, bool write, int subclass)
|
||||
struct scoutfs_block *bl, bool write, int subclass)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (root == &sbi->super.btree_root) {
|
||||
if (bh) {
|
||||
scoutfs_block_lock(bh, write, subclass);
|
||||
if (bl) {
|
||||
scoutfs_block_lock(bl, write, subclass);
|
||||
} else {
|
||||
if (write)
|
||||
down_write(&sbi->btree_rwsem);
|
||||
@@ -495,13 +491,13 @@ static void lock_tree_block(struct super_block *sb,
|
||||
|
||||
static void unlock_tree_block(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct buffer_head *bh, bool write)
|
||||
struct scoutfs_block *bl, bool write)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (root == &sbi->super.btree_root) {
|
||||
if (bh) {
|
||||
scoutfs_block_unlock(bh, write);
|
||||
if (bl) {
|
||||
scoutfs_block_unlock(bl, write);
|
||||
} else {
|
||||
if (write)
|
||||
up_write(&sbi->btree_rwsem);
|
||||
@@ -512,39 +508,39 @@ static void unlock_tree_block(struct super_block *sb,
|
||||
}
|
||||
|
||||
/* sorting relies on masking pointers to find the containing block */
|
||||
static inline struct buffer_head *check_bh_alignment(struct buffer_head *bh)
|
||||
static inline struct scoutfs_block *check_bl_alignment(struct scoutfs_block *bl)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(bh)) {
|
||||
struct scoutfs_btree_block *bt = bh_data(bh);
|
||||
if (!IS_ERR_OR_NULL(bl)) {
|
||||
struct scoutfs_btree_block *bt = scoutfs_block_data(bl);
|
||||
|
||||
if (WARN_ON_ONCE(aligned_bt(bt) != bt)) {
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put(bl);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
}
|
||||
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and initialize a new tree block. The caller adds references
|
||||
* to it.
|
||||
*/
|
||||
static struct buffer_head *alloc_tree_block(struct super_block *sb)
|
||||
static struct scoutfs_block *alloc_tree_block(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
|
||||
bh = scoutfs_block_dirty_alloc(sb);
|
||||
if (!IS_ERR(bh)) {
|
||||
bt = bh_data(bh);
|
||||
bl = scoutfs_block_dirty_alloc(sb);
|
||||
if (!IS_ERR(bl)) {
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
bt->free_end = cpu_to_le16(SCOUTFS_BLOCK_SIZE);
|
||||
bt->free_reclaim = 0;
|
||||
bt->nr_items = 0;
|
||||
}
|
||||
|
||||
return check_bh_alignment(bh);
|
||||
return check_bl_alignment(bl);
|
||||
}
|
||||
|
||||
/* the caller has ensured that the free must succeed */
|
||||
@@ -561,41 +557,41 @@ static void free_tree_block(struct super_block *sb, __le64 blkno)
|
||||
* Allocate a new tree block and point the root at it. The caller
|
||||
* is responsible for the items in the new root block.
|
||||
*/
|
||||
static struct buffer_head *grow_tree(struct super_block *sb,
|
||||
static struct scoutfs_block *grow_tree(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root)
|
||||
{
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
|
||||
bh = alloc_tree_block(sb);
|
||||
if (!IS_ERR(bh)) {
|
||||
hdr = bh_data(bh);
|
||||
bl = alloc_tree_block(sb);
|
||||
if (!IS_ERR(bl)) {
|
||||
hdr = scoutfs_block_data(bl);
|
||||
|
||||
root->height++;
|
||||
root->ref.blkno = hdr->blkno;
|
||||
root->ref.seq = hdr->seq;
|
||||
|
||||
set_block_lock_class(bh, root->height - 1);
|
||||
set_block_lock_class(bl, root->height - 1);
|
||||
}
|
||||
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
static struct buffer_head *get_block_ref(struct super_block *sb, int level,
|
||||
static struct scoutfs_block *get_block_ref(struct super_block *sb, int level,
|
||||
struct scoutfs_block_ref *ref,
|
||||
bool dirty)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
|
||||
if (dirty)
|
||||
bh = scoutfs_block_dirty_ref(sb, ref);
|
||||
bl = scoutfs_block_dirty_ref(sb, ref);
|
||||
else
|
||||
bh = scoutfs_block_read_ref(sb, ref);
|
||||
bl = scoutfs_block_read_ref(sb, ref);
|
||||
|
||||
if (!IS_ERR(bh))
|
||||
set_block_lock_class(bh, level);
|
||||
if (!IS_ERR(bl))
|
||||
set_block_lock_class(bl, level);
|
||||
|
||||
return check_bh_alignment(bh);
|
||||
return check_bl_alignment(bl);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -636,18 +632,18 @@ static void create_parent_item(struct scoutfs_btree_block *parent,
|
||||
* them locked. We only need to lock the new sibling if we return it
|
||||
* instead of our given block for the caller to continue descent.
|
||||
*/
|
||||
static struct buffer_head *try_split(struct super_block *sb,
|
||||
static struct scoutfs_block *try_split(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
int level, struct scoutfs_key *key,
|
||||
unsigned int val_len,
|
||||
struct scoutfs_btree_block *parent,
|
||||
unsigned int parent_pos,
|
||||
struct buffer_head *right_bh)
|
||||
struct scoutfs_block *right_bl)
|
||||
{
|
||||
struct scoutfs_btree_block *right = bh_data(right_bh);
|
||||
struct scoutfs_btree_block *right = scoutfs_block_data(right_bl);
|
||||
struct scoutfs_btree_block *left;
|
||||
struct buffer_head *left_bh;
|
||||
struct buffer_head *par_bh = NULL;
|
||||
struct scoutfs_block *left_bl;
|
||||
struct scoutfs_block *par_bl = NULL;
|
||||
struct scoutfs_key maximal;
|
||||
unsigned int all_bytes;
|
||||
|
||||
@@ -656,33 +652,33 @@ static struct buffer_head *try_split(struct super_block *sb,
|
||||
all_bytes = all_val_bytes(val_len);
|
||||
|
||||
if (contig_free(right) >= all_bytes)
|
||||
return right_bh;
|
||||
return right_bl;
|
||||
|
||||
if (reclaimable_free(right) >= all_bytes) {
|
||||
compact_items(right);
|
||||
return right_bh;
|
||||
return right_bl;
|
||||
}
|
||||
|
||||
/* alloc split neighbour first to avoid unwinding tree growth */
|
||||
left_bh = alloc_tree_block(sb);
|
||||
if (IS_ERR(left_bh)) {
|
||||
unlock_tree_block(sb, root, right_bh, true);
|
||||
scoutfs_block_put(right_bh);
|
||||
return left_bh;
|
||||
left_bl = alloc_tree_block(sb);
|
||||
if (IS_ERR(left_bl)) {
|
||||
unlock_tree_block(sb, root, right_bl, true);
|
||||
scoutfs_block_put(right_bl);
|
||||
return left_bl;
|
||||
}
|
||||
left = bh_data(left_bh);
|
||||
left = scoutfs_block_data(left_bl);
|
||||
|
||||
if (!parent) {
|
||||
par_bh = grow_tree(sb, root);
|
||||
if (IS_ERR(par_bh)) {
|
||||
par_bl = grow_tree(sb, root);
|
||||
if (IS_ERR(par_bl)) {
|
||||
free_tree_block(sb, left->hdr.blkno);
|
||||
scoutfs_block_put(left_bh);
|
||||
unlock_tree_block(sb, root, right_bh, true);
|
||||
scoutfs_block_put(right_bh);
|
||||
return par_bh;
|
||||
scoutfs_block_put(left_bl);
|
||||
unlock_tree_block(sb, root, right_bl, true);
|
||||
scoutfs_block_put(right_bl);
|
||||
return par_bl;
|
||||
}
|
||||
|
||||
parent = bh_data(par_bh);
|
||||
parent = scoutfs_block_data(par_bl);
|
||||
parent_pos = 0;
|
||||
|
||||
scoutfs_set_max_key(&maximal);
|
||||
@@ -695,19 +691,19 @@ static struct buffer_head *try_split(struct super_block *sb,
|
||||
|
||||
if (scoutfs_key_cmp(key, greatest_key(left)) <= 0) {
|
||||
/* insertion will go to the new left block */
|
||||
unlock_tree_block(sb, root, right_bh, true);
|
||||
lock_tree_block(sb, root, left_bh, true, 0);
|
||||
swap(right_bh, left_bh);
|
||||
unlock_tree_block(sb, root, right_bl, true);
|
||||
lock_tree_block(sb, root, left_bl, true, 0);
|
||||
swap(right_bl, left_bl);
|
||||
} else {
|
||||
/* insertion will still go through us, might need to compact */
|
||||
if (contig_free(right) < all_bytes)
|
||||
compact_items(right);
|
||||
}
|
||||
|
||||
scoutfs_block_put(par_bh);
|
||||
scoutfs_block_put(left_bh);
|
||||
scoutfs_block_put(par_bl);
|
||||
scoutfs_block_put(left_bl);
|
||||
|
||||
return right_bh;
|
||||
return right_bl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -733,22 +729,22 @@ static struct buffer_head *try_split(struct super_block *sb,
|
||||
*
|
||||
* XXX this could more cleverly chose a merge candidate sibling
|
||||
*/
|
||||
static struct buffer_head *try_merge(struct super_block *sb,
|
||||
static struct scoutfs_block *try_merge(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *parent,
|
||||
int level, unsigned int pos,
|
||||
struct buffer_head *bh)
|
||||
struct scoutfs_block *bl)
|
||||
{
|
||||
struct scoutfs_btree_block *bt = bh_data(bh);
|
||||
struct scoutfs_btree_block *bt = scoutfs_block_data(bl);
|
||||
struct scoutfs_btree_item *sib_item;
|
||||
struct scoutfs_btree_block *sib_bt;
|
||||
struct buffer_head *sib_bh;
|
||||
struct scoutfs_block *sib_bl;
|
||||
unsigned int sib_pos;
|
||||
bool move_right;
|
||||
int to_move;
|
||||
|
||||
if (reclaimable_free(bt) <= SCOUTFS_BTREE_FREE_LIMIT)
|
||||
return bh;
|
||||
return bl;
|
||||
|
||||
/* move items right into our block if we have a left sibling */
|
||||
if (pos) {
|
||||
@@ -760,26 +756,26 @@ static struct buffer_head *try_merge(struct super_block *sb,
|
||||
}
|
||||
sib_item = pos_item(parent, sib_pos);
|
||||
|
||||
sib_bh = get_block_ref(sb, level, (void *)sib_item->val, true);
|
||||
if (IS_ERR(sib_bh)) {
|
||||
sib_bl = get_block_ref(sb, level, (void *)sib_item->val, true);
|
||||
if (IS_ERR(sib_bl)) {
|
||||
/* XXX do we need to unlock this? don't think so */
|
||||
scoutfs_block_put(bh);
|
||||
return sib_bh;
|
||||
scoutfs_block_put(bl);
|
||||
return sib_bl;
|
||||
}
|
||||
sib_bt = bh_data(sib_bh);
|
||||
sib_bt = scoutfs_block_data(sib_bl);
|
||||
|
||||
if (!move_right) {
|
||||
unlock_tree_block(sb, root, bh, true);
|
||||
lock_tree_block(sb, root, sib_bh, true, 0);
|
||||
lock_tree_block(sb, root, bh, true, 1);
|
||||
unlock_tree_block(sb, root, bl, true);
|
||||
lock_tree_block(sb, root, sib_bl, true, 0);
|
||||
lock_tree_block(sb, root, bl, true, 1);
|
||||
|
||||
if (reclaimable_free(bt) <= SCOUTFS_BTREE_FREE_LIMIT) {
|
||||
unlock_tree_block(sb, root, sib_bh, true);
|
||||
scoutfs_block_put(sib_bh);
|
||||
return bh;
|
||||
unlock_tree_block(sb, root, sib_bl, true);
|
||||
scoutfs_block_put(sib_bl);
|
||||
return bl;
|
||||
}
|
||||
} else {
|
||||
lock_tree_block(sb, root, sib_bh, true, 1);
|
||||
lock_tree_block(sb, root, sib_bl, true, 1);
|
||||
}
|
||||
|
||||
if (used_total(sib_bt) <= reclaimable_free(bt))
|
||||
@@ -822,10 +818,10 @@ static struct buffer_head *try_merge(struct super_block *sb,
|
||||
free_tree_block(sb, parent->hdr.blkno);
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, sib_bh, true);
|
||||
scoutfs_block_put(sib_bh);
|
||||
unlock_tree_block(sb, root, sib_bl, true);
|
||||
scoutfs_block_put(sib_bl);
|
||||
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
enum {
|
||||
@@ -992,7 +988,7 @@ out:
|
||||
* keep searching sibling blocks if their search key falls at the end of
|
||||
* a leaf in their search direction.
|
||||
*/
|
||||
static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
static struct scoutfs_block *btree_walk(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *prev_key,
|
||||
@@ -1000,8 +996,8 @@ static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
unsigned int val_len, u64 seq, int op)
|
||||
{
|
||||
struct scoutfs_btree_block *parent = NULL;
|
||||
struct buffer_head *par_bh = NULL;
|
||||
struct buffer_head *bh = NULL;
|
||||
struct scoutfs_block *par_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_btree_item *item = NULL;
|
||||
struct scoutfs_block_ref *ref;
|
||||
struct scoutfs_key small;
|
||||
@@ -1025,11 +1021,11 @@ static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
|
||||
if (!root->height) {
|
||||
if (op == WALK_INSERT) {
|
||||
bh = ERR_PTR(-ENOENT);
|
||||
bl = ERR_PTR(-ENOENT);
|
||||
} else {
|
||||
bh = grow_tree(sb, root);
|
||||
if (!IS_ERR(bh)) {
|
||||
lock_tree_block(sb, root, bh, dirty, 0);
|
||||
bl = grow_tree(sb, root);
|
||||
if (!IS_ERR(bl)) {
|
||||
lock_tree_block(sb, root, bl, dirty, 0);
|
||||
unlock_tree_block(sb, root, NULL, dirty);
|
||||
}
|
||||
}
|
||||
@@ -1039,7 +1035,7 @@ static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
|
||||
/* skip the whole tree if the root ref's seq is old */
|
||||
if (op == WALK_NEXT_SEQ && le64_to_cpu(ref->seq) < seq) {
|
||||
bh = ERR_PTR(-ENOENT);
|
||||
bl = ERR_PTR(-ENOENT);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1048,36 +1044,37 @@ static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
|
||||
while (level--) {
|
||||
/* XXX hmm, need to think about retry */
|
||||
bh = get_block_ref(sb, level, ref, dirty);
|
||||
if (IS_ERR(bh))
|
||||
bl = get_block_ref(sb, level, ref, dirty);
|
||||
if (IS_ERR(bl))
|
||||
break;
|
||||
|
||||
/* XXX enable this */
|
||||
ret = 0 && verify_btree_block(bh_data(bh), level, &small, &large);
|
||||
ret = 0 && verify_btree_block(scoutfs_block_data(bl), level,
|
||||
&small, &large);
|
||||
if (ret) {
|
||||
scoutfs_block_put(bh);
|
||||
bh = ERR_PTR(ret);
|
||||
scoutfs_block_put(bl);
|
||||
bl = ERR_PTR(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
lock_tree_block(sb, root, bh, dirty, 0);
|
||||
lock_tree_block(sb, root, bl, dirty, 0);
|
||||
|
||||
if (op == WALK_INSERT)
|
||||
bh = try_split(sb, root, level, key, val_len, parent,
|
||||
pos, bh);
|
||||
bl = try_split(sb, root, level, key, val_len, parent,
|
||||
pos, bl);
|
||||
if ((op == WALK_DELETE) && parent)
|
||||
bh = try_merge(sb, root, parent, level, pos, bh);
|
||||
if (IS_ERR(bh))
|
||||
bl = try_merge(sb, root, parent, level, pos, bl);
|
||||
if (IS_ERR(bl))
|
||||
break;
|
||||
|
||||
unlock_tree_block(sb, root, par_bh, dirty);
|
||||
unlock_tree_block(sb, root, par_bl, dirty);
|
||||
|
||||
if (!level)
|
||||
break;
|
||||
|
||||
scoutfs_block_put(par_bh);
|
||||
par_bh = bh;
|
||||
parent = bh_data(par_bh);
|
||||
scoutfs_block_put(par_bl);
|
||||
par_bl = bl;
|
||||
parent = scoutfs_block_data(par_bl);
|
||||
|
||||
/*
|
||||
* Find the parent item that references the next child
|
||||
@@ -1089,9 +1086,9 @@ static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
if (pos >= parent->nr_items) {
|
||||
/* current block dropped as parent below */
|
||||
if (op == WALK_NEXT_SEQ)
|
||||
bh = ERR_PTR(-ENOENT);
|
||||
bl = ERR_PTR(-ENOENT);
|
||||
else
|
||||
bh = ERR_PTR(-EIO);
|
||||
bl = ERR_PTR(-EIO);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1119,11 +1116,11 @@ static struct buffer_head *btree_walk(struct super_block *sb,
|
||||
}
|
||||
|
||||
out:
|
||||
if (IS_ERR(bh))
|
||||
unlock_tree_block(sb, root, par_bh, dirty);
|
||||
scoutfs_block_put(par_bh);
|
||||
if (IS_ERR(bl))
|
||||
unlock_tree_block(sb, root, par_bl, dirty);
|
||||
scoutfs_block_put(par_bl);
|
||||
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1138,17 +1135,17 @@ int scoutfs_btree_lookup(struct super_block *sb,
|
||||
{
|
||||
struct scoutfs_btree_item *item;
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
unsigned int pos;
|
||||
int cmp;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_btree_lookup(sb, key, scoutfs_btree_val_length(val));
|
||||
|
||||
bh = btree_walk(sb, root, key, NULL, NULL, 0, 0, 0);
|
||||
if (IS_ERR(bh))
|
||||
return PTR_ERR(bh);
|
||||
bt = bh_data(bh);
|
||||
bl = btree_walk(sb, root, key, NULL, NULL, 0, 0, 0);
|
||||
if (IS_ERR(bl))
|
||||
return PTR_ERR(bl);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
pos = find_pos(bt, key, &cmp);
|
||||
if (cmp == 0) {
|
||||
@@ -1158,8 +1155,8 @@ int scoutfs_btree_lookup(struct super_block *sb,
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, false);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, false);
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
trace_printk("key "CKF" ret %d\n", CKA(key), ret);
|
||||
|
||||
@@ -1182,7 +1179,7 @@ int scoutfs_btree_insert(struct super_block *sb,
|
||||
{
|
||||
struct scoutfs_btree_item *item;
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
unsigned int val_len;
|
||||
int pos;
|
||||
int cmp;
|
||||
@@ -1198,10 +1195,10 @@ int scoutfs_btree_insert(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(val_len > SCOUTFS_MAX_ITEM_LEN))
|
||||
return -EINVAL;
|
||||
|
||||
bh = btree_walk(sb, root, key, NULL, NULL, val_len, 0, WALK_INSERT);
|
||||
if (IS_ERR(bh))
|
||||
return PTR_ERR(bh);
|
||||
bt = bh_data(bh);
|
||||
bl = btree_walk(sb, root, key, NULL, NULL, val_len, 0, WALK_INSERT);
|
||||
if (IS_ERR(bl))
|
||||
return PTR_ERR(bl);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
pos = find_pos(bt, key, &cmp);
|
||||
if (cmp) {
|
||||
@@ -1214,8 +1211,8 @@ int scoutfs_btree_insert(struct super_block *sb,
|
||||
ret = -EEXIST;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, true);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, true);
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1229,19 +1226,19 @@ int scoutfs_btree_delete(struct super_block *sb,
|
||||
struct scoutfs_key *key)
|
||||
{
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
int pos;
|
||||
int cmp;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_btree_delete(sb, key, 0);
|
||||
|
||||
bh = btree_walk(sb, root, key, NULL, NULL, 0, 0, WALK_DELETE);
|
||||
if (IS_ERR(bh)) {
|
||||
ret = PTR_ERR(bh);
|
||||
bl = btree_walk(sb, root, key, NULL, NULL, 0, 0, WALK_DELETE);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
goto out;
|
||||
}
|
||||
bt = bh_data(bh);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
pos = find_pos(bt, key, &cmp);
|
||||
if (cmp == 0) {
|
||||
@@ -1262,8 +1259,8 @@ int scoutfs_btree_delete(struct super_block *sb,
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, true);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, true);
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
out:
|
||||
trace_printk("key "CKF" ret %d\n", CKA(key), ret);
|
||||
@@ -1301,7 +1298,7 @@ static int btree_next(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key start = *first;
|
||||
struct scoutfs_key key = *first;
|
||||
struct scoutfs_key next_key;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
int pos;
|
||||
int ret;
|
||||
|
||||
@@ -1312,26 +1309,26 @@ static int btree_next(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
ret = -ENOENT;
|
||||
while (scoutfs_key_cmp(&key, last) <= 0) {
|
||||
|
||||
bh = btree_walk(sb, root, &key, NULL, &next_key, 0, seq, op);
|
||||
bl = btree_walk(sb, root, &key, NULL, &next_key, 0, seq, op);
|
||||
|
||||
/* next seq walks can terminate in parents with old seqs */
|
||||
if (op == WALK_NEXT_SEQ && bh == ERR_PTR(-ENOENT)) {
|
||||
if (op == WALK_NEXT_SEQ && bl == ERR_PTR(-ENOENT)) {
|
||||
key = next_key;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (IS_ERR(bh)) {
|
||||
ret = PTR_ERR(bh);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
break;
|
||||
}
|
||||
bt = bh_data(bh);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
/* keep trying leaves until next_key passes last */
|
||||
pos = find_pos_after_seq(bt, &key, 0, seq, op);
|
||||
if (pos >= bt->nr_items) {
|
||||
key = next_key;
|
||||
unlock_tree_block(sb, root, bh, false);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, false);
|
||||
scoutfs_block_put(bl);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1348,8 +1345,8 @@ static int btree_next(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, false);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, false);
|
||||
scoutfs_block_put(bl);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1400,7 +1397,7 @@ int scoutfs_btree_prev(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct scoutfs_key key = *last;
|
||||
struct scoutfs_key prev_key;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
int pos;
|
||||
int cmp;
|
||||
int ret;
|
||||
@@ -1411,19 +1408,19 @@ int scoutfs_btree_prev(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
ret = -ENOENT;
|
||||
while (scoutfs_key_cmp(&key, first) >= 0) {
|
||||
|
||||
bh = btree_walk(sb, root, &key, NULL, &prev_key, 0, 0, 0);
|
||||
if (IS_ERR(bh)) {
|
||||
ret = PTR_ERR(bh);
|
||||
bl = btree_walk(sb, root, &key, NULL, &prev_key, 0, 0, 0);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
break;
|
||||
}
|
||||
bt = bh_data(bh);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
pos = find_pos(bt, &key, &cmp);
|
||||
|
||||
/* walk to the prev leaf if we hit the front of this leaf */
|
||||
if (pos == 0 && cmp != 0) {
|
||||
unlock_tree_block(sb, root, bh, false);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, false);
|
||||
scoutfs_block_put(bl);
|
||||
if (scoutfs_key_is_zero(&key))
|
||||
break;
|
||||
key = prev_key;
|
||||
@@ -1444,8 +1441,8 @@ int scoutfs_btree_prev(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, false);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, false);
|
||||
scoutfs_block_put(bl);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1464,16 +1461,16 @@ int scoutfs_btree_dirty(struct super_block *sb,
|
||||
struct scoutfs_key *key)
|
||||
{
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
int cmp;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_btree_dirty(sb, key, 0);
|
||||
|
||||
bh = btree_walk(sb, root, key, NULL, NULL, 0, 0, WALK_DIRTY);
|
||||
if (IS_ERR(bh))
|
||||
return PTR_ERR(bh);
|
||||
bt = bh_data(bh);
|
||||
bl = btree_walk(sb, root, key, NULL, NULL, 0, 0, WALK_DIRTY);
|
||||
if (IS_ERR(bl))
|
||||
return PTR_ERR(bl);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
find_pos(bt, key, &cmp);
|
||||
if (cmp == 0) {
|
||||
@@ -1482,8 +1479,8 @@ int scoutfs_btree_dirty(struct super_block *sb,
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, true);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, true);
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
trace_printk("key "CKF" ret %d\n", CKA(key), ret);
|
||||
|
||||
@@ -1504,7 +1501,7 @@ int scoutfs_btree_update(struct super_block *sb,
|
||||
{
|
||||
struct scoutfs_btree_item *item;
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
int pos;
|
||||
int cmp;
|
||||
int ret;
|
||||
@@ -1512,10 +1509,10 @@ int scoutfs_btree_update(struct super_block *sb,
|
||||
trace_scoutfs_btree_update(sb, key,
|
||||
val ? scoutfs_btree_val_length(val) : 0);
|
||||
|
||||
bh = btree_walk(sb, root, key, NULL, NULL, 0, 0, WALK_DIRTY);
|
||||
if (IS_ERR(bh))
|
||||
return PTR_ERR(bh);
|
||||
bt = bh_data(bh);
|
||||
bl = btree_walk(sb, root, key, NULL, NULL, 0, 0, WALK_DIRTY);
|
||||
if (IS_ERR(bl))
|
||||
return PTR_ERR(bl);
|
||||
bt = scoutfs_block_data(bl);
|
||||
|
||||
pos = find_pos(bt, key, &cmp);
|
||||
if (cmp == 0) {
|
||||
@@ -1527,8 +1524,8 @@ int scoutfs_btree_update(struct super_block *sb,
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
unlock_tree_block(sb, root, bh, true);
|
||||
scoutfs_block_put(bh);
|
||||
unlock_tree_block(sb, root, bl, true);
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ struct buddy_info {
|
||||
u64 level_div[SCOUTFS_BUDDY_MAX_HEIGHT];
|
||||
|
||||
struct buddy_stack {
|
||||
struct buffer_head *bh[SCOUTFS_BUDDY_MAX_HEIGHT];
|
||||
struct scoutfs_block *bl[SCOUTFS_BUDDY_MAX_HEIGHT];
|
||||
u16 sl[SCOUTFS_BUDDY_MAX_HEIGHT];
|
||||
int nr;
|
||||
} stack;
|
||||
@@ -169,26 +169,27 @@ static int order_nr(int order, int nr)
|
||||
return order_off(order) + nr;
|
||||
}
|
||||
|
||||
static void stack_push(struct buddy_stack *sta, struct buffer_head *bh, u16 sl)
|
||||
static void stack_push(struct buddy_stack *sta, struct scoutfs_block *bl,
|
||||
u16 sl)
|
||||
{
|
||||
sta->bh[sta->nr] = bh;
|
||||
sta->bl[sta->nr] = bl;
|
||||
sta->sl[sta->nr++] = sl;
|
||||
}
|
||||
|
||||
/* sl isn't returned because callers peek the leaf where sl is meaningless */
|
||||
static struct buffer_head *stack_peek(struct buddy_stack *sta)
|
||||
static struct scoutfs_block *stack_peek(struct buddy_stack *sta)
|
||||
{
|
||||
if (sta->nr)
|
||||
return sta->bh[sta->nr - 1];
|
||||
return sta->bl[sta->nr - 1];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct buffer_head *stack_pop(struct buddy_stack *sta, u16 *sl)
|
||||
static struct scoutfs_block *stack_pop(struct buddy_stack *sta, u16 *sl)
|
||||
{
|
||||
if (sta->nr) {
|
||||
*sl = sta->sl[--sta->nr];
|
||||
return sta->bh[sta->nr];
|
||||
return sta->bl[sta->nr];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -287,16 +288,16 @@ static void stack_cleanup(struct super_block *sb)
|
||||
struct buddy_stack *sta = &binf->stack;
|
||||
struct scoutfs_buddy_root *root = &sbi->super.buddy_root;
|
||||
struct scoutfs_buddy_block *bud;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
u16 free_orders = 0;
|
||||
bool parent;
|
||||
u16 sl;
|
||||
int i;
|
||||
|
||||
parent = false;
|
||||
while ((bh = stack_pop(sta, &sl))) {
|
||||
while ((bl = stack_pop(sta, &sl))) {
|
||||
|
||||
bud = bh_data(bh);
|
||||
bud = scoutfs_block_data(bl);
|
||||
if (parent && !set_slot_free_orders(bud, sl, free_orders))
|
||||
break;
|
||||
|
||||
@@ -306,17 +307,17 @@ static void stack_cleanup(struct super_block *sb)
|
||||
free_orders |= 1 << i;
|
||||
}
|
||||
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put(bl);
|
||||
parent = true;
|
||||
}
|
||||
|
||||
/* set root if we got that far */
|
||||
if (bh == NULL)
|
||||
if (bl == NULL)
|
||||
root->slot.free_orders = cpu_to_le16(free_orders);
|
||||
|
||||
/* put any remaining blocks */
|
||||
while ((bh = stack_pop(sta, &sl)))
|
||||
scoutfs_block_put(bh);
|
||||
while ((bl = stack_pop(sta, &sl)))
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
}
|
||||
|
||||
@@ -344,14 +345,14 @@ static void clear_buddy_bit(struct scoutfs_buddy_block *bud, int order, int nr)
|
||||
*/
|
||||
static void init_buddy_block(struct buddy_info *binf,
|
||||
struct scoutfs_super_block *super,
|
||||
struct buffer_head *bh, int level)
|
||||
struct scoutfs_block *bl, int level)
|
||||
{
|
||||
struct scoutfs_buddy_block *bud = bh_data(bh);
|
||||
struct scoutfs_buddy_block *bud = scoutfs_block_data(bl);
|
||||
u16 count;
|
||||
int nr;
|
||||
int i;
|
||||
|
||||
scoutfs_block_zero(bh, sizeof(bud->hdr));
|
||||
scoutfs_block_zero(bl, sizeof(bud->hdr));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bud->first_set); i++)
|
||||
bud->first_set[i] = cpu_to_le16(U16_MAX);
|
||||
@@ -387,36 +388,34 @@ static void init_buddy_block(struct buddy_info *binf,
|
||||
* construct a fake ref so we can re-use the block ref cow code. When
|
||||
* we initialize the first use of a block we use the first of the pair.
|
||||
*/
|
||||
static struct buffer_head *get_buddy_block(struct super_block *sb,
|
||||
struct scoutfs_buddy_slot *slot,
|
||||
u64 blkno, int level)
|
||||
static struct scoutfs_block *get_buddy_block(struct super_block *sb,
|
||||
struct scoutfs_buddy_slot *slot,
|
||||
u64 blkno, int level)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct buddy_info *binf = sbi->buddy_info;
|
||||
struct scoutfs_buddy_block *bud;
|
||||
struct scoutfs_block_ref ref;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
|
||||
trace_printk("getting block level %d blkno %llu slot seq %llu off %u\n",
|
||||
level, blkno, le64_to_cpu(slot->seq), slot->blkno_off);
|
||||
|
||||
/* init a new block for an unused slot */
|
||||
if (slot->seq == 0) {
|
||||
bh = scoutfs_block_dirty(sb, blkno);
|
||||
if (!IS_ERR(bh))
|
||||
init_buddy_block(binf, super, bh, level);
|
||||
bl = scoutfs_block_dirty(sb, blkno);
|
||||
if (!IS_ERR(bl))
|
||||
init_buddy_block(binf, super, bl, level);
|
||||
} else {
|
||||
/* construct block ref from tree walk blkno and slot ref */
|
||||
ref.blkno = cpu_to_le64(blkno + slot->blkno_off);
|
||||
ref.seq = slot->seq;
|
||||
bh = scoutfs_block_dirty_ref(sb, &ref);
|
||||
bl = scoutfs_block_dirty_ref(sb, &ref);
|
||||
}
|
||||
|
||||
if (!IS_ERR(bh)) {
|
||||
bud = bh_data(bh);
|
||||
|
||||
trace_printk("got blkno %llu\n", (u64)bh->b_blocknr);
|
||||
if (!IS_ERR(bl)) {
|
||||
bud = scoutfs_block_data(bl);
|
||||
|
||||
/* rebuild slot ref to blkno */
|
||||
if (slot->seq != bud->hdr.seq) {
|
||||
@@ -427,7 +426,7 @@ static struct buffer_head *get_buddy_block(struct super_block *sb,
|
||||
}
|
||||
}
|
||||
|
||||
return bh;
|
||||
return bl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -457,7 +456,7 @@ static int buddy_walk(struct super_block *sb, u64 blk, int order, u64 *base)
|
||||
struct scoutfs_buddy_root *root = &sbi->super.buddy_root;
|
||||
struct scoutfs_buddy_block *bud;
|
||||
struct scoutfs_buddy_slot *slot;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
u64 blkno;
|
||||
int level;
|
||||
int ret = 0;
|
||||
@@ -475,16 +474,16 @@ static int buddy_walk(struct super_block *sb, u64 blk, int order, u64 *base)
|
||||
|
||||
while (level--) {
|
||||
/* XXX do base and level make sense here? */
|
||||
bh = get_buddy_block(sb, slot, blkno, level);
|
||||
if (IS_ERR(bh)) {
|
||||
ret = PTR_ERR(bh);
|
||||
bl = get_buddy_block(sb, slot, blkno, level);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
break;
|
||||
}
|
||||
|
||||
trace_printk("before blk %llu order %d level %d blkno %llu base %llu sl %d\n",
|
||||
blk, order, level, blkno, *base, sl);
|
||||
|
||||
bud = bh_data(bh);
|
||||
bud = scoutfs_block_data(bl);
|
||||
|
||||
if (level) {
|
||||
if (order >= 0) {
|
||||
@@ -516,7 +515,7 @@ static int buddy_walk(struct super_block *sb, u64 blk, int order, u64 *base)
|
||||
blk, order, level, blkno, *base, sl);
|
||||
|
||||
|
||||
stack_push(sta, bh, sl);
|
||||
stack_push(sta, bl, sl);
|
||||
}
|
||||
|
||||
trace_printk("walking ret %d\n", ret);
|
||||
@@ -554,7 +553,7 @@ static int buddy_alloc(struct super_block *sb, u64 *blk, int order, int found)
|
||||
struct buddy_info *binf = sbi->buddy_info;
|
||||
struct buddy_stack *sta = &binf->stack;
|
||||
struct scoutfs_buddy_block *bud;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
u64 base;
|
||||
int ret;
|
||||
int nr;
|
||||
@@ -569,8 +568,8 @@ static int buddy_alloc(struct super_block *sb, u64 *blk, int order, int found)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
bh = stack_peek(sta);
|
||||
bud = bh_data(bh);
|
||||
bl = stack_peek(sta);
|
||||
bud = scoutfs_block_data(bl);
|
||||
|
||||
if (found >= 0) {
|
||||
nr = le16_to_cpu(bud->first_set[found]);
|
||||
@@ -624,7 +623,7 @@ static int buddy_free(struct super_block *sb, u64 blk, int order)
|
||||
struct buddy_info *binf = sbi->buddy_info;
|
||||
struct buddy_stack *sta = &binf->stack;
|
||||
struct scoutfs_buddy_block *bud;
|
||||
struct buffer_head *bh;
|
||||
struct scoutfs_block *bl;
|
||||
u64 unused;
|
||||
int ret;
|
||||
int nr;
|
||||
@@ -634,8 +633,8 @@ static int buddy_free(struct super_block *sb, u64 blk, int order)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
bh = stack_peek(sta);
|
||||
bud = bh_data(bh);
|
||||
bl = stack_peek(sta);
|
||||
bud = scoutfs_block_data(bl);
|
||||
|
||||
nr = buddy_bit(blk) >> order;
|
||||
for (i = order; i < SCOUTFS_BUDDY_ORDERS - 2; i++) {
|
||||
|
||||
@@ -108,23 +108,25 @@ int scoutfs_write_dirty_super(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super;
|
||||
struct scoutfs_block *bl;
|
||||
struct buffer_head *bh;
|
||||
int ret;
|
||||
|
||||
/* XXX prealloc? */
|
||||
/* XXX hack is immediately repaired in the coming patches */
|
||||
bh = sb_getblk(sb, le64_to_cpu(sbi->super.hdr.blkno));
|
||||
if (!bh)
|
||||
return -ENOMEM;
|
||||
super = bh_data(bh);
|
||||
bl = (void *)bh;
|
||||
super = scoutfs_block_data(bl);
|
||||
|
||||
*super = sbi->super;
|
||||
scoutfs_block_zero(bh, sizeof(struct scoutfs_super_block));
|
||||
scoutfs_block_set_crc(bh);
|
||||
scoutfs_block_zero(bl, sizeof(struct scoutfs_super_block));
|
||||
scoutfs_block_set_crc(bl);
|
||||
|
||||
mark_buffer_dirty(bh);
|
||||
ret = sync_dirty_buffer(bh);
|
||||
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put(bl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -132,18 +134,18 @@ static int read_supers(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super;
|
||||
struct buffer_head *bh = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
int found = -1;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SCOUTFS_SUPER_NR; i++) {
|
||||
scoutfs_block_put(bh);
|
||||
bh = scoutfs_block_read(sb, SCOUTFS_SUPER_BLKNO + i);
|
||||
if (IS_ERR(bh)) {
|
||||
scoutfs_block_put(bl);
|
||||
bl = scoutfs_block_read(sb, SCOUTFS_SUPER_BLKNO + i);
|
||||
if (IS_ERR(bl)) {
|
||||
scoutfs_warn(sb, "couldn't read super block %u", i);
|
||||
continue;
|
||||
}
|
||||
super = bh_data(bh);
|
||||
super = scoutfs_block_data(bl);
|
||||
|
||||
if (super->id != cpu_to_le64(SCOUTFS_SUPER_ID)) {
|
||||
scoutfs_warn(sb, "super block %u has invalid id %llx",
|
||||
@@ -158,7 +160,7 @@ static int read_supers(struct super_block *sb)
|
||||
}
|
||||
}
|
||||
|
||||
scoutfs_block_put(bh);
|
||||
scoutfs_block_put(bl);
|
||||
|
||||
if (found < 0) {
|
||||
scoutfs_err(sb, "unable to read valid super block");
|
||||
|
||||
Reference in New Issue
Block a user