Add _block_dirty_ref to dirty blocks in one place

To create dirty blocks in memory each block type caller currently gets a
reference on a created block and then dirties it.  The reference it gets
could be an existing cached block that stale readers are currently
using.  This creates a problem with our block consistency protocol where
writers can dirty and modify cached blocks that readers are currently
reading in memory, leading to read corruption.

This commit is the first step in addressing that problem.  We add a
scoutfs_block_dirty_ref() call which returns a reference to a dirtied
block from the block core in one call.  We're only changing the callers
in this patch but we'll be reworking the dirtying mechanism in an
upcoming patch to avoid corrupting readers.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2021-02-21 11:26:11 -08:00
parent f18fa0e97a
commit 6237f0adc5
7 changed files with 187 additions and 348 deletions

View File

@@ -393,82 +393,8 @@ static int dirty_list_block(struct super_block *sb,
u64 dirty, u64 *old,
struct scoutfs_block **bl_ret)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_block *cow_bl = NULL;
struct scoutfs_block *bl = NULL;
struct scoutfs_alloc_list_block *lblk;
bool undo_alloc = false;
u64 blkno;
int ret;
int err;
blkno = le64_to_cpu(ref->blkno);
if (blkno) {
ret = read_list_block(sb, ref, &bl);
if (ret < 0)
goto out;
if (scoutfs_block_writer_is_dirty(sb, bl)) {
ret = 0;
goto out;
}
}
if (dirty == 0) {
ret = scoutfs_alloc_meta(sb, alloc, wri, &dirty);
if (ret < 0)
goto out;
undo_alloc = true;
}
cow_bl = scoutfs_block_create(sb, dirty);
if (IS_ERR(cow_bl)) {
ret = PTR_ERR(cow_bl);
goto out;
}
if (old) {
*old = blkno;
} else if (blkno) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
if (ret < 0)
goto out;
}
if (bl)
memcpy(cow_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
else
memset(cow_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
bl = cow_bl;
cow_bl = NULL;
lblk = bl->data;
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
lblk->hdr.fsid = super->hdr.fsid;
lblk->hdr.blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&lblk->hdr.seq, sizeof(lblk->hdr.seq));
ref->blkno = lblk->hdr.blkno;
ref->seq = lblk->hdr.seq;
scoutfs_block_writer_mark_dirty(sb, wri, bl);
ret = 0;
out:
scoutfs_block_put(sb, cow_bl);
if (ret < 0 && undo_alloc) {
err = scoutfs_free_meta(sb, alloc, wri, dirty);
BUG_ON(err); /* inconsistent */
}
if (ret < 0) {
scoutfs_block_put(sb, bl);
bl = NULL;
}
*bl_ret = bl;
return ret;
return scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST,
bl_ret, dirty, old);
}
/* Allocate a new dirty list block if we fill up more than 3/4 of the block. */

View File

@@ -28,6 +28,7 @@
#include "counters.h"
#include "msg.h"
#include "scoutfs_trace.h"
#include "alloc.h"
/*
* The scoutfs block cache manages metadata blocks that can be larger
@@ -719,9 +720,8 @@ void scoutfs_block_writer_init(struct super_block *sb,
* and allocate with an advancing cursor so we always dirty in block
* offset order and can walk our list to submit nice ordered IO.
*/
void scoutfs_block_writer_mark_dirty(struct super_block *sb,
struct scoutfs_block_writer *wri,
struct scoutfs_block *bl)
static void block_mark_dirty(struct super_block *sb, struct scoutfs_block_writer *wri,
struct scoutfs_block *bl)
{
struct block_private *bp = BLOCK_PRIVATE(bl);
@@ -736,14 +736,119 @@ void scoutfs_block_writer_mark_dirty(struct super_block *sb,
}
}
bool scoutfs_block_writer_is_dirty(struct super_block *sb,
struct scoutfs_block *bl)
static bool block_is_dirty(struct block_private *bp)
{
struct block_private *bp = BLOCK_PRIVATE(bl);
return test_bit(BLOCK_BIT_DIRTY, &bp->bits) != 0;
}
/*
* Give the caller a dirty block that is pointed to by their ref.
*
* The ref may already refer to a cached dirty block. In that case the
* dirty block is returned.
*
* If the ref doesn't refer to a dirty block, then a new block is always
* allocated and returned. If the ref refers to an existing block then
* its contents are copied into the new block.
*
* If a new blkno is allocated then the ref is updated and any existing
* blkno is freed.
*
* The dirty_blkno and ref_blkno arguments are used by the metadata
* allocator to avoid recursing into itself. dirty_blkno provides the
* blkno of the new dirty block to avoid calling _alloc_meta and
* ref_blkno is set to the old blkno instead of freeing it with
* _free_meta.
*/
int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, struct scoutfs_block_ref *ref,
u32 magic, struct scoutfs_block **bl_ret,
u64 dirty_blkno, u64 *ref_blkno)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_block *cow_bl = NULL;
struct scoutfs_block *bl = NULL;
struct block_private *bp = NULL;
struct scoutfs_block_header *hdr;
bool undo_alloc = false;
u64 blkno;
int ret;
int err;
blkno = le64_to_cpu(ref->blkno);
if (blkno) {
ret = scoutfs_block_read_ref(sb, ref, magic, bl_ret);
if (ret < 0)
goto out;
bl = *bl_ret;
bp = BLOCK_PRIVATE(bl);
if (block_is_dirty(bp)) {
ret = 0;
goto out;
}
}
if (dirty_blkno == 0) {
ret = scoutfs_alloc_meta(sb, alloc, wri, &dirty_blkno);
if (ret < 0)
goto out;
undo_alloc = true;
}
cow_bl = scoutfs_block_create(sb, dirty_blkno);
if (IS_ERR(cow_bl)) {
ret = PTR_ERR(cow_bl);
goto out;
}
if (ref_blkno) {
*ref_blkno = blkno;
} else if (blkno) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
if (ret < 0)
goto out;
}
if (bl)
memcpy(cow_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
else
memset(cow_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
bl = cow_bl;
cow_bl = NULL;
hdr = bl->data;
hdr->magic = cpu_to_le32(magic);
hdr->fsid = super->hdr.fsid;
hdr->blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
trace_scoutfs_block_dirty_ref(sb, le64_to_cpu(ref->blkno), le64_to_cpu(ref->seq),
le64_to_cpu(hdr->blkno), le64_to_cpu(hdr->seq));
ref->blkno = hdr->blkno;
ref->seq = hdr->seq;
block_mark_dirty(sb, wri, bl);
ret = 0;
out:
scoutfs_block_put(sb, cow_bl);
if (ret < 0 && undo_alloc) {
err = scoutfs_free_meta(sb, alloc, wri, dirty_blkno);
BUG_ON(err); /* inconsistent */
}
if (ret < 0) {
scoutfs_block_put(sb, bl);
bl = NULL;
}
*bl_ret = bl;
return ret;
}
/*
* Submit writes for all the dirty blocks in the writer's dirty list and
* wait for them to complete. The caller must serialize this with

View File

@@ -20,11 +20,10 @@ void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
void scoutfs_block_writer_init(struct super_block *sb,
struct scoutfs_block_writer *wri);
void scoutfs_block_writer_mark_dirty(struct super_block *sb,
struct scoutfs_block_writer *wri,
struct scoutfs_block *bl);
bool scoutfs_block_writer_is_dirty(struct super_block *sb,
struct scoutfs_block *bl);
int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, struct scoutfs_block_ref *ref,
u32 magic, struct scoutfs_block **bl_ret,
u64 dirty_blkno, u64 *ref_blkno);
int scoutfs_block_writer_write(struct super_block *sb,
struct scoutfs_block_writer *wri);
void scoutfs_block_writer_forget_all(struct super_block *sb,

View File

@@ -80,7 +80,7 @@ enum btree_walk_flags {
BTW_NEXT = (1 << 0), /* return >= key */
BTW_PREV = (1 << 1), /* return <= key */
BTW_DIRTY = (1 << 2), /* cow stable blocks */
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref */
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref, requires dirty */
BTW_INSERT = (1 << 4), /* walking to insert, try splitting */
BTW_DELETE = (1 << 5), /* walking to delete, try joining */
};
@@ -628,102 +628,27 @@ static int get_ref_block(struct super_block *sb,
struct scoutfs_block_ref *ref,
struct scoutfs_block **bl_ret)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_btree_block *bt = NULL;
struct scoutfs_btree_block *new;
struct scoutfs_block *new_bl = NULL;
struct scoutfs_block *bl = NULL;
u64 blkno;
u64 seq;
int ret;
/* always get the current block, either to return or cow from */
if (ref && ref->blkno) {
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BTREE, &bl);
if (ret < 0) {
if (ret == -ESTALE)
scoutfs_inc_counter(sb, btree_stale_read);
goto out;
}
if (WARN_ON_ONCE((flags & BTW_ALLOC) && !(flags & BTW_DIRTY)))
return -EINVAL;
/*
* We need to create a new dirty copy of the block if
* the caller asked for it. If the block is already
* dirty then we can return it.
*/
if (!(flags & BTW_DIRTY) ||
scoutfs_block_writer_is_dirty(sb, bl)) {
ret = 0;
goto out;
}
} else if (!(flags & BTW_ALLOC)) {
if (ref->blkno == 0 && !(flags & BTW_ALLOC)) {
ret = -ENOENT;
goto out;
}
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
if (ret < 0)
goto out;
prandom_bytes(&seq, sizeof(seq));
new_bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(new_bl)) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
BUG_ON(ret);
ret = PTR_ERR(new_bl);
goto out;
}
new = (void *)new_bl->data;
/* free old stable blkno we're about to overwrite */
if (ref && ref->blkno) {
ret = scoutfs_free_meta(sb, alloc, wri,
le64_to_cpu(ref->blkno));
if (ret) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
BUG_ON(ret);
scoutfs_block_put(sb, new_bl);
new_bl = NULL;
goto out;
}
}
scoutfs_block_writer_mark_dirty(sb, wri, new_bl);
trace_scoutfs_btree_dirty_block(sb, blkno, seq,
bt ? le64_to_cpu(bt->hdr.blkno) : 0,
bt ? le64_to_cpu(bt->hdr.seq) : 0);
if (bt) {
/* returning a cow of an existing block */
memcpy(new, bt, SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
} else {
/* returning a newly allocated block */
memset(new, 0, SCOUTFS_BLOCK_LG_SIZE);
new->hdr.fsid = super->hdr.fsid;
}
bl = new_bl;
bt = new;
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
bt->hdr.blkno = cpu_to_le64(blkno);
bt->hdr.seq = cpu_to_le64(seq);
if (ref) {
ref->blkno = bt->hdr.blkno;
ref->seq = bt->hdr.seq;
}
ret = 0;
if (flags & BTW_DIRTY)
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_BTREE,
bl_ret, 0, NULL);
else
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BTREE, bl_ret);
out:
if (ret) {
scoutfs_block_put(sb, bl);
bl = NULL;
if (ret < 0) {
if (ret == -ESTALE)
scoutfs_inc_counter(sb, btree_stale_read);
}
*bl_ret = bl;
return ret;
}
@@ -803,6 +728,7 @@ static int try_split(struct super_block *sb,
struct scoutfs_block *par_bl = NULL;
struct scoutfs_btree_block *left;
struct scoutfs_key max_key;
struct scoutfs_block_ref zeros;
int ret;
int err;
@@ -820,7 +746,8 @@ static int try_split(struct super_block *sb,
scoutfs_inc_counter(sb, btree_split);
/* alloc split neighbour first to avoid unwinding tree growth */
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &left_bl);
memset(&zeros, 0, sizeof(zeros));
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &left_bl);
if (ret)
return ret;
left = left_bl->data;
@@ -828,7 +755,8 @@ static int try_split(struct super_block *sb,
init_btree_block(left, right->level);
if (!parent) {
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &par_bl);
memset(&zeros, 0, sizeof(zeros));
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &par_bl);
if (ret) {
err = scoutfs_free_meta(sb, alloc, wri,
le64_to_cpu(left->hdr.blkno));
@@ -1196,8 +1124,7 @@ restart:
if (!(flags & BTW_INSERT)) {
ret = -ENOENT;
} else {
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC,
&root->ref, &bl);
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &root->ref, &bl);
if (ret == 0) {
bt = bl->data;
init_btree_block(bt, 0);

View File

@@ -377,18 +377,14 @@ out:
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
struct scoutfs_lock *lock)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
DECLARE_FOREST_INFO(sb, finf);
struct scoutfs_block *new_bl = NULL;
struct scoutfs_block *bl = NULL;
struct scoutfs_bloom_block *bb;
struct scoutfs_block_ref *ref;
struct forest_bloom_nrs bloom;
int nr_set = 0;
u64 blkno;
u64 nr;
int ret;
int err;
int i;
nr = le64_to_cpu(finf->our_log.nr);
@@ -406,53 +402,11 @@ int scoutfs_forest_set_bloom_bits(struct super_block *sb,
ref = &finf->our_log.bloom_ref;
if (ref->blkno) {
bl = read_bloom_ref(sb, ref);
if (IS_ERR(bl)) {
ret = PTR_ERR(bl);
goto unlock;
}
bb = bl->data;
}
if (!ref->blkno || !scoutfs_block_writer_is_dirty(sb, bl)) {
ret = scoutfs_alloc_meta(sb, finf->alloc, finf->wri, &blkno);
if (ret < 0)
goto unlock;
new_bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(new_bl)) {
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
blkno);
BUG_ON(err); /* could have dirtied */
ret = PTR_ERR(new_bl);
goto unlock;
}
if (bl) {
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
le64_to_cpu(ref->blkno));
BUG_ON(err); /* could have dirtied */
memcpy(new_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
} else {
memset(new_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
}
scoutfs_block_writer_mark_dirty(sb, finf->wri, new_bl);
scoutfs_block_put(sb, bl);
bl = new_bl;
bb = bl->data;
new_bl = NULL;
bb->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BLOOM);
bb->hdr.fsid = super->hdr.fsid;
bb->hdr.blkno = cpu_to_le64(blkno);
prandom_bytes(&bb->hdr.seq, sizeof(bb->hdr.seq));
ref->blkno = bb->hdr.blkno;
ref->seq = bb->hdr.seq;
}
ret = scoutfs_block_dirty_ref(sb, finf->alloc, finf->wri, ref, SCOUTFS_BLOCK_MAGIC_BLOOM,
&bl, 0, NULL);
if (ret < 0)
goto unlock;
bb = bl->data;
for (i = 0; i < ARRAY_SIZE(bloom.nrs); i++) {
if (!test_and_set_bit_le(bloom.nrs[i], bb->bits)) {

View File

@@ -1631,33 +1631,6 @@ TRACE_EVENT(scoutfs_btree_read_error,
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq)
);
TRACE_EVENT(scoutfs_btree_dirty_block,
TP_PROTO(struct super_block *sb, u64 blkno, u64 seq,
u64 bt_blkno, u64 bt_seq),
TP_ARGS(sb, blkno, seq, bt_blkno, bt_seq),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, blkno)
__field(__u64, seq)
__field(__u64, bt_blkno)
__field(__u64, bt_seq)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->blkno = blkno;
__entry->seq = seq;
__entry->bt_blkno = bt_blkno;
__entry->bt_seq = bt_seq;
),
TP_printk(SCSBF" blkno %llu seq %llu bt_blkno %llu bt_seq %llu",
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq,
__entry->bt_blkno, __entry->bt_seq)
);
TRACE_EVENT(scoutfs_btree_walk,
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
struct scoutfs_key *key, int flags, int level,
@@ -2056,6 +2029,33 @@ TRACE_EVENT(scoutfs_forest_init_our_log,
__entry->blkno, __entry->seq)
);
TRACE_EVENT(scoutfs_block_dirty_ref,
TP_PROTO(struct super_block *sb, u64 ref_blkno, u64 ref_seq,
u64 block_blkno, u64 block_seq),
TP_ARGS(sb, ref_blkno, ref_seq, block_blkno, block_seq),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, ref_blkno)
__field(__u64, ref_seq)
__field(__u64, block_blkno)
__field(__u64, block_seq)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->ref_blkno = ref_blkno;
__entry->ref_seq = ref_seq;
__entry->block_blkno = block_blkno;
__entry->block_seq = block_seq;
),
TP_printk(SCSBF" ref_blkno %llu ref_seq %llu block_blkno %llu block_seq %llu",
SCSB_TRACE_ARGS, __entry->ref_blkno, __entry->ref_seq,
__entry->block_blkno, __entry->block_seq)
);
DECLARE_EVENT_CLASS(scoutfs_block_class,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno, int refcount, int io_count,
unsigned long bits, __u64 accessed),

View File

@@ -255,24 +255,9 @@ static u8 height_for_blk(u64 blk)
return hei;
}
static void init_file_block(struct super_block *sb, struct scoutfs_block *bl,
int level)
static inline u32 srch_level_magic(int level)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_block_header *hdr;
/* don't leak uninit kernel mem.. block should do this for us? */
memset(bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
hdr = bl->data;
hdr->fsid = super->hdr.fsid;
hdr->blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
if (level)
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_PARENT);
else
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK);
return level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT : SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
}
/*
@@ -287,7 +272,7 @@ static int read_srch_block(struct super_block *sb,
struct scoutfs_block_ref *ref,
struct scoutfs_block **bl_ret)
{
u32 magic = level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT : SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
u32 magic = srch_level_magic(level);
int ret;
ret = scoutfs_block_read_ref(sb, ref, magic, bl_ret);
@@ -368,12 +353,10 @@ static int get_file_block(struct super_block *sb,
struct scoutfs_block_header *hdr;
struct scoutfs_block *bl = NULL;
struct scoutfs_srch_parent *srp;
struct scoutfs_block *new_bl;
struct scoutfs_block_ref new_root_ref;
struct scoutfs_block_ref *ref;
u64 blkno = 0;
int level;
int ind;
int err;
int ret;
u8 hei;
@@ -385,29 +368,21 @@ static int get_file_block(struct super_block *sb,
goto out;
}
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
memset(&new_root_ref, 0, sizeof(new_root_ref));
level = sfl->height;
ret = scoutfs_block_dirty_ref(sb, alloc, wri, &new_root_ref,
srch_level_magic(level), &bl, 0, NULL);
if (ret < 0)
goto out;
bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(bl)) {
ret = PTR_ERR(bl);
goto out;
}
blkno = 0;
scoutfs_block_writer_mark_dirty(sb, wri, bl);
init_file_block(sb, bl, sfl->height);
if (sfl->height) {
if (level) {
srp = bl->data;
srp->refs[0].blkno = sfl->ref.blkno;
srp->refs[0].seq = sfl->ref.seq;
srp->refs[0] = sfl->ref;
}
hdr = bl->data;
sfl->ref.blkno = hdr->blkno;
sfl->ref.seq = hdr->seq;
sfl->ref = new_root_ref;
sfl->height++;
scoutfs_block_put(sb, bl);
bl = NULL;
@@ -423,54 +398,13 @@ static int get_file_block(struct super_block *sb,
goto out;
}
/* read an existing block */
if (ref->blkno) {
ret = read_srch_block(sb, wri, level, ref, &bl);
if (ret < 0)
goto out;
}
/* allocate a new block if we need it */
if (!ref->blkno || ((flags & GFB_DIRTY) &&
!scoutfs_block_writer_is_dirty(sb, bl))) {
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
if (ret < 0)
goto out;
new_bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(new_bl)) {
ret = PTR_ERR(new_bl);
goto out;
}
if (bl) {
/* cow old block if we have one */
ret = scoutfs_free_meta(sb, alloc, wri,
bl->blkno);
if (ret)
goto out;
memcpy(new_bl->data, bl->data,
SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
bl = new_bl;
hdr = bl->data;
hdr->blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
} else {
/* init new allocated block */
bl = new_bl;
init_file_block(sb, bl, level);
}
blkno = 0;
scoutfs_block_writer_mark_dirty(sb, wri, bl);
/* update file or parent block ref */
hdr = bl->data;
ref->blkno = hdr->blkno;
ref->seq = hdr->seq;
}
if (flags & GFB_DIRTY)
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, srch_level_magic(level),
&bl, 0, NULL);
else
ret = scoutfs_block_read_ref(sb, ref, srch_level_magic(level), &bl);
if (ret < 0)
goto out;
if (level == 0) {
ret = 0;
@@ -490,12 +424,6 @@ static int get_file_block(struct super_block *sb,
out:
scoutfs_block_put(sb, parent);
/* return allocated blkno on error */
if (blkno > 0) {
err = scoutfs_free_meta(sb, alloc, wri, blkno);
BUG_ON(err); /* radix should have been dirty */
}
if (ret < 0) {
scoutfs_block_put(sb, bl);
bl = NULL;