mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-09 13:23:14 +00:00
scoutfs: free source blkno after cow
As we update references to point to newly allocated dirty blocks in a transaction we need to free the old referenced blknos. By using a two-phase dirty/free interface we can avoid freeing failing after we've made it through stages of the cow processing which can't be easily undone. Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
@@ -434,6 +434,7 @@ static struct scoutfs_block *dirty_ref(struct super_block *sb,
|
||||
struct scoutfs_block *found;
|
||||
struct scoutfs_block *bl;
|
||||
unsigned long flags;
|
||||
u64 clean_blkno;
|
||||
u64 blkno = 0;
|
||||
int ret;
|
||||
int err;
|
||||
@@ -442,6 +443,12 @@ static struct scoutfs_block *dirty_ref(struct super_block *sb,
|
||||
if (IS_ERR(bl) || ref->seq == sbi->super.hdr.seq)
|
||||
return bl;
|
||||
|
||||
clean_blkno = bl->blkno;
|
||||
|
||||
ret = scoutfs_buddy_dirty(sb, clean_blkno, 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_buddy_alloc_same(sb, &blkno, 0, le64_to_cpu(ref->blkno));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -498,6 +505,9 @@ static struct scoutfs_block *dirty_ref(struct super_block *sb,
|
||||
spin_unlock_irqrestore(&sbi->block_lock, flags);
|
||||
radix_tree_preload_end();
|
||||
|
||||
/* free clean blkno after preload end enables preemption */
|
||||
err = scoutfs_buddy_free(sb, clean_blkno, 0);
|
||||
WARN_ON(err); /* XXX corruption (dirtying should prevent) */
|
||||
ret = 0;
|
||||
out:
|
||||
scoutfs_put_block(copy_bl);
|
||||
|
||||
@@ -63,9 +63,7 @@
|
||||
* - shrink and grow
|
||||
* - metadata and data regions
|
||||
* - worry about testing for free buddies outside device during free?
|
||||
* - scoutfs_dirty_ref should call us to free old stable
|
||||
* - btree should free blocks on merge and some failure
|
||||
* - might want to add a alloc predirty call to avoid error unwind failure
|
||||
* - we could track the first set in order bitmaps, dunno if it'd be worth it
|
||||
*/
|
||||
|
||||
@@ -597,6 +595,89 @@ int scoutfs_buddy_alloc(struct super_block *sb, u64 *blkno, int order)
|
||||
return alloc_region(sb, blkno, order, 0, REGION_BUDDY);
|
||||
}
|
||||
|
||||
static int bitmap_dirty(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block *bl;
|
||||
|
||||
/* mkfs should have ensured that there's bitmap blocks */
|
||||
/* XXX corruption */
|
||||
if (sbi->super.buddy_bm_ref.blkno == 0)
|
||||
return -EIO;
|
||||
|
||||
/* dirty the bitmap block */
|
||||
bl = scoutfs_block_cow_ref(sb, &sbi->super.buddy_bm_ref);
|
||||
if (IS_ERR(bl))
|
||||
return PTR_ERR(bl);
|
||||
|
||||
scoutfs_put_block(bl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int buddy_dirty(struct super_block *sb, u64 blkno, int order)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_buddy_indirect *ind;
|
||||
struct scoutfs_block *ind_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
int ret;
|
||||
int sl;
|
||||
|
||||
mutex_lock(&sbi->buddy_mutex);
|
||||
|
||||
/* mkfs should have ensured that there's indirect blocks */
|
||||
if (sbi->super.buddy_ind_ref.blkno == 0) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* get the dirty indirect block */
|
||||
ind_bl = scoutfs_block_cow_ref(sb, &sbi->super.buddy_ind_ref);
|
||||
if (IS_ERR(ind_bl)) {
|
||||
ret = PTR_ERR(ind_bl);
|
||||
goto out;
|
||||
}
|
||||
ind = ind_bl->data;
|
||||
|
||||
sl = indirect_slot(super, blkno);
|
||||
bl = dirty_buddy_block(sb, sl, &ind->slots[sl]);
|
||||
if (IS_ERR(bl))
|
||||
ret = PTR_ERR(bl);
|
||||
else
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_unlock(&sbi->buddy_mutex);
|
||||
scoutfs_put_block(ind_bl);
|
||||
scoutfs_put_block(bl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Create dirty cow copies of the bitmap, indirect, and buddy blocks
|
||||
* so that a free of the given extent in the current transaction is
|
||||
* guaranteed to succeed.
|
||||
*
|
||||
* This is only meant for buddy allocators who are complicated enough
|
||||
* to need help avoiding error conditions.
|
||||
*/
|
||||
int scoutfs_buddy_dirty(struct super_block *sb, u64 blkno, int order)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
|
||||
switch(blkno_region(super, blkno)) {
|
||||
case REGION_BM:
|
||||
return bitmap_dirty(sb, blkno);
|
||||
case REGION_BUDDY:
|
||||
return buddy_dirty(sb, blkno, order);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The block layer allocates from the same region as an existing blkno
|
||||
* when it's allocating for cow.
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
int scoutfs_buddy_alloc(struct super_block *sb, u64 *blkno, int order);
|
||||
int scoutfs_buddy_alloc_same(struct super_block *sb, u64 *blkno, int order,
|
||||
u64 existing);
|
||||
int scoutfs_buddy_dirty(struct super_block *sb, u64 blkno, int order);
|
||||
int scoutfs_buddy_free(struct super_block *sb, u64 blkno, int order);
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user