scoutfs: add support for statfs

To do a credible job of this we need to track the number of free blocks.
We add counters of order allocations free to the indirect blocks so that
we can quickly scan them.  We also need a bit of help to count inodes.

Finally I noticed that we were miscalculating the number of slots in the
indirect blocks because we were using the size of the buddy block
header, not the size of the indirect block header.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2016-08-24 15:52:54 -07:00
parent c90710d26b
commit cb318982c9
6 changed files with 124 additions and 17 deletions

View File

@@ -11,6 +11,7 @@
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/statfs.h>
#include "super.h"
#include "format.h"
@@ -164,16 +165,22 @@ static int test_buddy_bit_or_higher(struct scoutfs_buddy_block *bud, int order,
return false;
}
static void set_buddy_bit(struct scoutfs_buddy_block *bud, int order, int nr)
static void set_buddy_bit(struct scoutfs_buddy_indirect *ind,
struct scoutfs_buddy_block *bud, int order, int nr)
{
if (!test_and_set_bit_le(order_nr(order, nr), bud->bits))
if (!test_and_set_bit_le(order_nr(order, nr), bud->bits)) {
le64_add_cpu(&ind->order_totals[order], 1);
le32_add_cpu(&bud->order_counts[order], 1);
}
}
static void clear_buddy_bit(struct scoutfs_buddy_block *bud, int order, int nr)
static void clear_buddy_bit(struct scoutfs_buddy_indirect *ind,
struct scoutfs_buddy_block *bud, int order, int nr)
{
if (test_and_clear_bit_le(order_nr(order, nr), bud->bits))
if (test_and_clear_bit_le(order_nr(order, nr), bud->bits)) {
le64_add_cpu(&ind->order_totals[order], -1);
le32_add_cpu(&bud->order_counts[order], -1);
}
}
/* returns INT_MAX when there are no bits set */
@@ -289,8 +296,10 @@ static int bitmap_free(struct super_block *sb, u64 blkno)
* Give the caller a dirty buddy block. If the slot hasn't been used
* yet then we need to allocate and initialize a new block.
*/
static struct buffer_head *dirty_buddy_block(struct super_block *sb, int sl,
struct scoutfs_buddy_slot *slot)
static struct buffer_head *dirty_buddy_block(struct super_block *sb,
struct scoutfs_buddy_indirect *ind,
int sl,
struct scoutfs_buddy_slot *slot)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
@@ -325,7 +334,7 @@ static struct buffer_head *dirty_buddy_block(struct super_block *sb, int sl,
size = 1 << order;
nr = 0;
while (count > size) {
set_buddy_bit(bud, order, nr);
set_buddy_bit(ind, bud, order, nr);
nr++;
count -= size;
}
@@ -333,7 +342,7 @@ static struct buffer_head *dirty_buddy_block(struct super_block *sb, int sl,
/* set order bits for each of the bits set in the remaining count */
do {
if (count & (1 << order)) {
set_buddy_bit(bud, order, nr);
set_buddy_bit(ind, bud, order, nr);
nr = (nr + 1) << 1;
} else {
nr <<= 1;
@@ -405,7 +414,8 @@ static int find_first_fit(struct scoutfs_super_block *super, int sl,
* that breaks up a larger order. Higher level callers iterate over
* smaller orders to provide partial allocations.
*/
static int alloc_slot(struct super_block *sb, int sl,
static int alloc_slot(struct super_block *sb,
struct scoutfs_buddy_indirect *ind, int sl,
struct scoutfs_buddy_slot *slot,
struct scoutfs_block_ref *stable_ref,
u64 *blkno, int order)
@@ -422,7 +432,7 @@ static int alloc_slot(struct super_block *sb, int sl,
int i;
/* initialize or dirty the slot's buddy block */
bh = dirty_buddy_block(sb, sl, slot);
bh = dirty_buddy_block(sb, ind, sl, slot);
if (IS_ERR(bh))
return PTR_ERR(bh);
bud = bh_data(bh);
@@ -450,11 +460,11 @@ static int alloc_slot(struct super_block *sb, int sl,
*blkno = slot_buddy_blkno(super, sl, found, nr);
/* always clear the found order */
clear_buddy_bit(bud, found, nr);
clear_buddy_bit(ind, bud, found, nr);
/* free right buddies if we're breaking up a larger order */
for (nr <<= 1, i = found - 1; i >= order; i--, nr <<= 1)
set_buddy_bit(bud, i, nr | 1);
set_buddy_bit(ind, bud, i, nr | 1);
update_free_orders(slot, bud);
ret = 0;
@@ -524,8 +534,8 @@ static int alloc_order(struct super_block *sb, u64 *blkno, int order)
continue;
}
ret = alloc_slot(sb, i, &ind->slots[i], &st_ind->slots[i].ref,
blkno, order);
ret = alloc_slot(sb, ind, i, &ind->slots[i],
&st_ind->slots[i].ref, blkno, order);
if (ret != -ENOSPC)
break;
}
@@ -664,11 +674,11 @@ static int buddy_free(struct super_block *sb, u64 blkno, int order)
if (!test_buddy_bit(bud, i, nr ^ 1))
break;
clear_buddy_bit(bud, i, nr ^ 1);
clear_buddy_bit(ind, bud, i, nr ^ 1);
nr >>= 1;
}
set_buddy_bit(bud, i, nr);
set_buddy_bit(ind, bud, i, nr);
update_free_orders(&ind->slots[sl], bud);
scoutfs_block_put(bh);
@@ -803,3 +813,38 @@ out:
trace_printk("blkno %llu order %d ret %d\n", blkno, order, ret);
return ret;
}
/*
* For now we only have one indirect block off the super. When we grow
* multiple commit block pairs that reference root and indirect blocks
* then we'll need to iterate over those. These results will only ever
* be approximate so we can simply use racey valid ref reads to be able
* to sample while others are writing.
*/
int scoutfs_buddy_bfree(struct super_block *sb, u64 *bfree)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
struct scoutfs_buddy_indirect *ind;
struct buffer_head *bh;
int ret;
int i;
*bfree = 0;
bh = scoutfs_block_read_ref(sb, &super->buddy_ind_ref);
if (IS_ERR(bh)) {
ret = PTR_ERR(bh);
goto out;
}
ind = bh_data(bh);
for (i = 0; i < SCOUTFS_BUDDY_ORDERS; i++)
*bfree += le64_to_cpu(ind->order_totals[i]) << i;
scoutfs_block_put(bh);
ret = 0;
out:
return ret;
}

View File

@@ -8,5 +8,6 @@ int scoutfs_buddy_free(struct super_block *sb, u64 blkno, int order);
void scoutfs_buddy_free_extent(struct super_block *sb, u64 blkno, u64 count);
int scoutfs_buddy_was_free(struct super_block *sb, u64 blkno, int order);
int scoutfs_buddy_bfree(struct super_block *sb, u64 *bfree);
#endif

View File

@@ -72,6 +72,7 @@ struct scoutfs_buddy_block {
struct scoutfs_buddy_indirect {
struct scoutfs_block_header hdr;
__le64 order_totals[SCOUTFS_BUDDY_ORDERS];
struct scoutfs_buddy_slot {
__u8 free_orders;
struct scoutfs_block_ref ref;
@@ -79,7 +80,7 @@ struct scoutfs_buddy_indirect {
} __packed;
#define SCOUTFS_BUDDY_SLOTS \
((SCOUTFS_BLOCK_SIZE - sizeof(struct scoutfs_buddy_block)) / \
((SCOUTFS_BLOCK_SIZE - sizeof(struct scoutfs_buddy_indirect)) / \
sizeof(struct scoutfs_buddy_slot))
/*

View File

@@ -263,6 +263,22 @@ void scoutfs_update_inode_item(struct inode *inode)
trace_scoutfs_update_inode(inode);
}
/*
* A quick atomic sample of the last inode number that's been allocated.
*/
u64 scoutfs_last_ino(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
u64 last;
spin_lock(&sbi->next_ino_lock);
last = le64_to_cpu(super->next_ino);
spin_unlock(&sbi->next_ino_lock);
return last;
}
static int alloc_ino(struct super_block *sb, u64 *ino)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);

View File

@@ -30,6 +30,8 @@ void scoutfs_update_inode_item(struct inode *inode);
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode, dev_t rdev);
u64 scoutfs_last_ino(struct super_block *sb);
void scoutfs_inode_exit(void);
int scoutfs_inode_init(void);

View File

@@ -17,6 +17,7 @@
#include <linux/magic.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/statfs.h>
#include "super.h"
#include "format.h"
@@ -27,14 +28,55 @@
#include "block.h"
#include "counters.h"
#include "trans.h"
#include "buddy.h"
#include "scoutfs_trace.h"
static struct kset *scoutfs_kset;
/*
* We fake the number of free inodes value by assuming that we can fill
* free blocks with a certain number of inodes. We then the number of
* current inodes to that free count to determine the total possible
* inodes.
*
* The fsid that we report is constructed from the xor of the first two
* and second two little endian u32s that make up the uuid bytes.
*/
static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
{
struct super_block *sb = dentry->d_inode->i_sb;
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
__le32 * __packed uuid = (void *)super->uuid;
int ret;
ret = scoutfs_buddy_bfree(sb, &kst->f_bfree);
if (ret)
return ret;
kst->f_type = SCOUTFS_SUPER_MAGIC;
kst->f_bsize = SCOUTFS_BLOCK_SIZE;
kst->f_blocks = le64_to_cpu(super->total_blocks);
kst->f_bavail = kst->f_bfree;
kst->f_ffree = kst->f_bfree * 17;
kst->f_files = kst->f_ffree + scoutfs_last_ino(sb);
/* this fsid is constant.. the uuid is different */
kst->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[1]);
kst->f_fsid.val[1] = le32_to_cpu(uuid[2]) ^ le32_to_cpu(uuid[3]);
kst->f_namelen = SCOUTFS_NAME_LEN;
kst->f_frsize = SCOUTFS_BLOCK_SIZE;
/* the vfs fills f_flags */
return 0;
}
static const struct super_operations scoutfs_super_ops = {
.alloc_inode = scoutfs_alloc_inode,
.destroy_inode = scoutfs_destroy_inode,
.sync_fs = scoutfs_sync_fs,
.statfs = scoutfs_statfs,
};
/*