Compare commits

..

2 Commits

Author SHA1 Message Date
Ben McClelland
0b521a943e dont mark skipped tests as failure 2021-01-15 10:45:57 -07:00
Ben McClelland
36a3f04566 skip xfstests instead of fail if repo not specified 2021-01-15 10:45:57 -07:00
86 changed files with 3559 additions and 5872 deletions

View File

@@ -31,9 +31,15 @@ functionality hasn't been implemented. It's appropriate for early
adopters and interested developers, not for production use.
In that vein, expect significant incompatible changes to both the format
of network messages and persistent structures. Since the format hash-checking
has now been removed in preparation for release, if there is any doubt, mkfs
is strongly recommended.
of network messages and persistent structures. To avoid mistakes the
implementation currently calculates a hash of the format and ioctl
header files in the source tree. The kernel module will refuse to mount
a volume created by userspace utilities with a mismatched hash, and it
will refuse to connect to a remote node with a mismatched hash. This
means having to unmount, mkfs, and remount everything across many
functional changes. Once the format is nailed down we'll wire up
forward and back compat machinery and remove this temporary safety
measure.
The current kernel module is developed against the RHEL/CentOS 7.x
kernel to minimize the friction of developing and testing with partners'
@@ -65,13 +71,8 @@ The steps for getting scoutfs mounted and operational are:
2. Make a new filesystem on the devices with the userspace utilities
3. Mount the devices on all the nodes
In this example we use three nodes. The names of the block devices are
the same on all the nodes. Two of the nodes will be quorum members. A
majority of quorum members must be mounted to elect a leader to run a
server that all the mounts connect to. It should be noted that two
quorum members results in a majority of one, each member itself, so
split brain elections are possible but so unlikely that it's fine for a
demonstration.
In this example we run all of these commands on three nodes. The names
of the block devices are the same on all the nodes.
1. Get the Kernel Module and Userspace Binaries
@@ -93,30 +94,24 @@ demonstration.
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
```
2. Make a New Filesystem (**destroys contents**)
2. Make a New Filesystem (**destroys contents, no questions asked**)
We specify quorum slots with the addresses of each of the quorum
member nodes, the metadata device, and the data device.
We specify that two of our three nodes must be present to form a
quorum for the system to function.
```shell
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
scoutfs mkfs -Q 2 /dev/meta_dev /dev/data_dev
```
3. Mount the Filesystem
First, mount each of the quorum nodes so that they can elect and
start a server for the remaining node to connect to. The slot numbers
were specified with the leading "0,..." and "1,..." in the mkfs options
above.
Each mounting node provides its local IP address on which it will run
an internal server for the other mounts if it is elected the leader by
the quorum.
```shell
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
```
Then mount the remaining node which can now connect to the running server.
```shell
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
mkdir /mnt/scoutfs
mount -t scoutfs -o server_addr=$NODE_ADDR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
```
4. For Kicks, Observe the Metadata Change Index

View File

@@ -16,7 +16,11 @@ SCOUTFS_GIT_DESCRIBE := \
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
echo no-git)
SCOUTFS_FORMAT_HASH := \
$(shell cat src/format.h src/ioctl.h | md5sum | cut -b1-16)
SCOUTFS_ARGS := SCOUTFS_GIT_DESCRIBE=$(SCOUTFS_GIT_DESCRIBE) \
SCOUTFS_FORMAT_HASH=$(SCOUTFS_FORMAT_HASH) \
CONFIG_SCOUTFS_FS=m -C $(SK_KSRC) M=$(CURDIR)/src \
EXTRA_CFLAGS="-Werror"

View File

@@ -1,6 +1,7 @@
obj-$(CONFIG_SCOUTFS_FS) := scoutfs.o
CFLAGS_super.o = -DSCOUTFS_GIT_DESCRIBE=\"$(SCOUTFS_GIT_DESCRIBE)\"
CFLAGS_super.o = -DSCOUTFS_GIT_DESCRIBE=\"$(SCOUTFS_GIT_DESCRIBE)\" \
-DSCOUTFS_FORMAT_HASH=0x$(SCOUTFS_FORMAT_HASH)LLU
CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
@@ -27,11 +28,9 @@ scoutfs-y += \
lock_server.o \
msg.o \
net.o \
omap.o \
options.o \
per_task.o \
quorum.o \
recov.o \
scoutfs_trace.o \
server.o \
sort_priv.o \

View File

@@ -252,7 +252,7 @@ void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
{
memset(alloc, 0, sizeof(struct scoutfs_alloc));
seqlock_init(&alloc->seqlock);
spin_lock_init(&alloc->lock);
mutex_init(&alloc->mutex);
alloc->avail = *avail;
alloc->freed = *freed;
@@ -358,24 +358,31 @@ static void list_block_sort(struct scoutfs_alloc_list_block *lblk)
/*
* We're always reading blocks that we own, so we shouldn't see stale
* references but we could retry reads after dropping stale cached
* blocks. If we do see a stale error then we've hit persistent
* corruption.
* references. But the cached block can be stale and we can need to
* invalidate it.
*/
static int read_list_block(struct super_block *sb, struct scoutfs_block_ref *ref,
static int read_list_block(struct super_block *sb,
struct scoutfs_alloc_list_ref *ref,
struct scoutfs_block **bl_ret)
{
int ret;
struct scoutfs_block *bl = NULL;
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, bl_ret);
if (ret < 0) {
if (ret == -ESTALE) {
scoutfs_inc_counter(sb, alloc_stale_list_block);
ret = -EIO;
}
};
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
if (!IS_ERR_OR_NULL(bl) &&
!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
SCOUTFS_BLOCK_MAGIC_ALLOC_LIST)) {
scoutfs_inc_counter(sb, alloc_stale_cached_list_block);
scoutfs_block_invalidate(sb, bl);
scoutfs_block_put(sb, bl);
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
}
if (IS_ERR(bl)) {
*bl_ret = NULL;
return PTR_ERR(bl);
}
return ret;
*bl_ret = bl;
return 0;
}
/*
@@ -389,12 +396,86 @@ static int read_list_block(struct super_block *sb, struct scoutfs_block_ref *ref
static int dirty_list_block(struct super_block *sb,
struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_block_ref *ref,
struct scoutfs_alloc_list_ref *ref,
u64 dirty, u64 *old,
struct scoutfs_block **bl_ret)
{
return scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST,
bl_ret, dirty, old);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_block *cow_bl = NULL;
struct scoutfs_block *bl = NULL;
struct scoutfs_alloc_list_block *lblk;
bool undo_alloc = false;
u64 blkno;
int ret;
int err;
blkno = le64_to_cpu(ref->blkno);
if (blkno) {
ret = read_list_block(sb, ref, &bl);
if (ret < 0)
goto out;
if (scoutfs_block_writer_is_dirty(sb, bl)) {
ret = 0;
goto out;
}
}
if (dirty == 0) {
ret = scoutfs_alloc_meta(sb, alloc, wri, &dirty);
if (ret < 0)
goto out;
undo_alloc = true;
}
cow_bl = scoutfs_block_create(sb, dirty);
if (IS_ERR(cow_bl)) {
ret = PTR_ERR(cow_bl);
goto out;
}
if (old) {
*old = blkno;
} else if (blkno) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
if (ret < 0)
goto out;
}
if (bl)
memcpy(cow_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
else
memset(cow_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
bl = cow_bl;
cow_bl = NULL;
lblk = bl->data;
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
lblk->hdr.fsid = super->hdr.fsid;
lblk->hdr.blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&lblk->hdr.seq, sizeof(lblk->hdr.seq));
ref->blkno = lblk->hdr.blkno;
ref->seq = lblk->hdr.seq;
scoutfs_block_writer_mark_dirty(sb, wri, bl);
ret = 0;
out:
scoutfs_block_put(sb, cow_bl);
if (ret < 0 && undo_alloc) {
err = scoutfs_free_meta(sb, alloc, wri, dirty);
BUG_ON(err); /* inconsistent */
}
if (ret < 0) {
scoutfs_block_put(sb, bl);
bl = NULL;
}
*bl_ret = bl;
return ret;
}
/* Allocate a new dirty list block if we fill up more than 3/4 of the block. */
@@ -416,7 +497,7 @@ static int dirty_alloc_blocks(struct super_block *sb,
struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri)
{
struct scoutfs_block_ref orig_freed;
struct scoutfs_alloc_list_ref orig_freed;
struct scoutfs_alloc_list_block *lblk;
struct scoutfs_block *av_bl = NULL;
struct scoutfs_block *fr_bl = NULL;
@@ -526,8 +607,7 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
if (ret < 0)
goto out;
write_seqlock(&alloc->seqlock);
spin_lock(&alloc->lock);
lblk = alloc->dirty_avail_bl->data;
if (WARN_ON_ONCE(lblk->nr == 0)) {
/* shouldn't happen, transaction should commit first */
@@ -537,8 +617,7 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
list_block_remove(&alloc->avail, lblk, 1);
ret = 0;
}
write_sequnlock(&alloc->seqlock);
spin_unlock(&alloc->lock);
out:
if (ret < 0)
@@ -561,8 +640,7 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
if (ret < 0)
goto out;
write_seqlock(&alloc->seqlock);
spin_lock(&alloc->lock);
lblk = alloc->dirty_freed_bl->data;
if (WARN_ON_ONCE(list_block_space(lblk->nr) == 0)) {
/* shouldn't happen, transaction should commit first */
@@ -571,8 +649,7 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
list_block_add(&alloc->freed, lblk, blkno);
ret = 0;
}
write_sequnlock(&alloc->seqlock);
spin_unlock(&alloc->lock);
out:
scoutfs_inc_counter(sb, alloc_free_meta);
@@ -693,13 +770,8 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
ret = 0;
out:
if (ret < 0) {
/*
* Special retval meaning there wasn't space to alloc from
* this txn. Doesn't mean filesystem is completely full.
* Maybe upper layers want to try again.
*/
if (ret == -ENOENT)
ret = -ENOBUFS;
ret = -ENOSPC;
*blkno_ret = 0;
*count_ret = 0;
} else {
@@ -1029,7 +1101,7 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
struct scoutfs_alloc_list_head *src)
{
struct scoutfs_alloc_list_block *lblk;
struct scoutfs_block_ref *ref;
struct scoutfs_alloc_list_ref *ref;
struct scoutfs_block *prev = NULL;
struct scoutfs_block *bl = NULL;
int ret = 0;
@@ -1070,23 +1142,17 @@ out:
/*
* Returns true if meta avail and free don't have room for the given
* number of allocations or frees. This is called at a significantly
* higher frequency than allocations as writers try to enter
* transactions. This is the only reader of the seqlock which gives
* read-mostly sampling instead of bouncing a spinlock around all the
* cores.
* number of alloctions or frees.
*/
bool scoutfs_alloc_meta_low(struct super_block *sb,
struct scoutfs_alloc *alloc, u32 nr)
{
unsigned int seq;
bool lo;
do {
seq = read_seqbegin(&alloc->seqlock);
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
list_block_space(alloc->freed.first_nr) < nr;
} while (read_seqretry(&alloc->seqlock, seq));
spin_lock(&alloc->lock);
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
list_block_space(alloc->freed.first_nr) < nr;
spin_unlock(&alloc->lock);
return lo;
}
@@ -1098,8 +1164,8 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
int scoutfs_alloc_foreach(struct super_block *sb,
scoutfs_alloc_foreach_cb_t cb, void *arg)
{
struct scoutfs_block_ref stale_refs[2] = {{0,}};
struct scoutfs_block_ref refs[2] = {{0,}};
struct scoutfs_btree_ref stale_refs[2] = {{0,}};
struct scoutfs_btree_ref refs[2] = {{0,}};
struct scoutfs_super_block *super = NULL;
struct scoutfs_srch_compact *sc;
struct scoutfs_log_trees lt;

View File

@@ -72,8 +72,7 @@
* transaction.
*/
struct scoutfs_alloc {
/* writers rarely modify list_head avail/freed. readers often check for _meta_alloc_low */
seqlock_t seqlock;
spinlock_t lock;
struct mutex mutex;
struct scoutfs_block *dirty_avail_bl;
struct scoutfs_block *dirty_freed_bl;

File diff suppressed because it is too large Load Diff

View File

@@ -13,16 +13,27 @@ struct scoutfs_block {
void *priv;
};
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
struct scoutfs_block **bl_ret);
__le32 scoutfs_block_calc_crc(struct scoutfs_block_header *hdr, u32 size);
bool scoutfs_block_valid_crc(struct scoutfs_block_header *hdr, u32 size);
bool scoutfs_block_valid_ref(struct super_block *sb,
struct scoutfs_block_header *hdr,
__le64 seq, __le64 blkno);
struct scoutfs_block *scoutfs_block_create(struct super_block *sb, u64 blkno);
struct scoutfs_block *scoutfs_block_read(struct super_block *sb, u64 blkno);
void scoutfs_block_invalidate(struct super_block *sb, struct scoutfs_block *bl);
bool scoutfs_block_consistent_ref(struct super_block *sb,
struct scoutfs_block *bl,
__le64 seq, __le64 blkno, u32 magic);
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
void scoutfs_block_writer_init(struct super_block *sb,
struct scoutfs_block_writer *wri);
int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, struct scoutfs_block_ref *ref,
u32 magic, struct scoutfs_block **bl_ret,
u64 dirty_blkno, u64 *ref_blkno);
void scoutfs_block_writer_mark_dirty(struct super_block *sb,
struct scoutfs_block_writer *wri,
struct scoutfs_block *bl);
bool scoutfs_block_writer_is_dirty(struct super_block *sb,
struct scoutfs_block *bl);
int scoutfs_block_writer_write(struct super_block *sb,
struct scoutfs_block_writer *wri);
void scoutfs_block_writer_forget_all(struct super_block *sb,

View File

@@ -80,7 +80,7 @@ enum btree_walk_flags {
BTW_NEXT = (1 << 0), /* return >= key */
BTW_PREV = (1 << 1), /* return <= key */
BTW_DIRTY = (1 << 2), /* cow stable blocks */
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref, requires dirty */
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref */
BTW_INSERT = (1 << 4), /* walking to insert, try splitting */
BTW_DELETE = (1 << 5), /* walking to delete, try joining */
};
@@ -619,36 +619,140 @@ static void move_items(struct scoutfs_btree_block *dst,
* This is used to lookup cached blocks, read blocks, cow blocks for
* dirtying, and allocate new blocks.
*
* If we read a stale block we return stale so the caller can retry with
* a newer root or return an error.
* Btree blocks don't have rigid cache consistency. We can be following
* block references into cached blocks that are now stale or can be
* following a stale root into blocks that have been overwritten. If we
* hit a block that looks stale we first invalidate the cache and retry,
* returning -ESTALE if it still looks wrong. The caller can retry the
* read from a more current root or decide that this is a persistent
* error.
*/
static int get_ref_block(struct super_block *sb,
struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, int flags,
struct scoutfs_block_ref *ref,
struct scoutfs_btree_ref *ref,
struct scoutfs_block **bl_ret)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_btree_block *bt = NULL;
struct scoutfs_btree_block *new;
struct scoutfs_block *new_bl = NULL;
struct scoutfs_block *bl = NULL;
bool retried = false;
u64 blkno;
u64 seq;
int ret;
if (WARN_ON_ONCE((flags & BTW_ALLOC) && !(flags & BTW_DIRTY)))
return -EINVAL;
/* always get the current block, either to return or cow from */
if (ref && ref->blkno) {
retry:
if (ref->blkno == 0 && !(flags & BTW_ALLOC)) {
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
if (IS_ERR(bl)) {
trace_scoutfs_btree_read_error(sb, ref);
scoutfs_inc_counter(sb, btree_read_error);
ret = PTR_ERR(bl);
goto out;
}
bt = (void *)bl->data;
if (!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
SCOUTFS_BLOCK_MAGIC_BTREE) ||
scoutfs_trigger(sb, BTREE_STALE_READ)) {
scoutfs_inc_counter(sb, btree_stale_read);
scoutfs_block_invalidate(sb, bl);
scoutfs_block_put(sb, bl);
bl = NULL;
if (!retried) {
retried = true;
goto retry;
}
ret = -ESTALE;
goto out;
}
/*
* We need to create a new dirty copy of the block if
* the caller asked for it. If the block is already
* dirty then we can return it.
*/
if (!(flags & BTW_DIRTY) ||
scoutfs_block_writer_is_dirty(sb, bl)) {
ret = 0;
goto out;
}
} else if (!(flags & BTW_ALLOC)) {
ret = -ENOENT;
goto out;
}
if (flags & BTW_DIRTY)
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_BTREE,
bl_ret, 0, NULL);
else
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BTREE, bl_ret);
out:
if (ret < 0) {
if (ret == -ESTALE)
scoutfs_inc_counter(sb, btree_stale_read);
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
if (ret < 0)
goto out;
prandom_bytes(&seq, sizeof(seq));
new_bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(new_bl)) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
BUG_ON(ret);
ret = PTR_ERR(new_bl);
goto out;
}
new = (void *)new_bl->data;
/* free old stable blkno we're about to overwrite */
if (ref && ref->blkno) {
ret = scoutfs_free_meta(sb, alloc, wri,
le64_to_cpu(ref->blkno));
if (ret) {
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
BUG_ON(ret);
scoutfs_block_put(sb, new_bl);
new_bl = NULL;
goto out;
}
}
scoutfs_block_writer_mark_dirty(sb, wri, new_bl);
trace_scoutfs_btree_dirty_block(sb, blkno, seq,
bt ? le64_to_cpu(bt->hdr.blkno) : 0,
bt ? le64_to_cpu(bt->hdr.seq) : 0);
if (bt) {
/* returning a cow of an existing block */
memcpy(new, bt, SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
} else {
/* returning a newly allocated block */
memset(new, 0, SCOUTFS_BLOCK_LG_SIZE);
new->hdr.fsid = super->hdr.fsid;
}
bl = new_bl;
bt = new;
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
bt->hdr.blkno = cpu_to_le64(blkno);
bt->hdr.seq = cpu_to_le64(seq);
if (ref) {
ref->blkno = bt->hdr.blkno;
ref->seq = bt->hdr.seq;
}
ret = 0;
out:
if (ret) {
scoutfs_block_put(sb, bl);
bl = NULL;
}
*bl_ret = bl;
return ret;
}
@@ -662,7 +766,7 @@ static void create_parent_item(struct scoutfs_btree_block *parent,
{
struct scoutfs_avl_node *par;
int cmp;
struct scoutfs_block_ref ref = {
struct scoutfs_btree_ref ref = {
.blkno = child->hdr.blkno,
.seq = child->hdr.seq,
};
@@ -680,7 +784,7 @@ static void update_parent_item(struct scoutfs_btree_block *parent,
struct scoutfs_btree_item *par_item,
struct scoutfs_btree_block *child)
{
struct scoutfs_block_ref *ref = item_val(parent, par_item);
struct scoutfs_btree_ref *ref = item_val(parent, par_item);
par_item->key = *item_key(last_item(child));
ref->blkno = child->hdr.blkno;
@@ -728,13 +832,12 @@ static int try_split(struct super_block *sb,
struct scoutfs_block *par_bl = NULL;
struct scoutfs_btree_block *left;
struct scoutfs_key max_key;
struct scoutfs_block_ref zeros;
int ret;
int err;
/* parents need to leave room for child references */
if (right->level)
val_len = sizeof(struct scoutfs_block_ref);
val_len = sizeof(struct scoutfs_btree_ref);
/* don't need to split if there's enough space for the item */
if (mid_free_item_room(right, val_len))
@@ -746,8 +849,7 @@ static int try_split(struct super_block *sb,
scoutfs_inc_counter(sb, btree_split);
/* alloc split neighbour first to avoid unwinding tree growth */
memset(&zeros, 0, sizeof(zeros));
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &left_bl);
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &left_bl);
if (ret)
return ret;
left = left_bl->data;
@@ -755,8 +857,7 @@ static int try_split(struct super_block *sb,
init_btree_block(left, right->level);
if (!parent) {
memset(&zeros, 0, sizeof(zeros));
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &par_bl);
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &par_bl);
if (ret) {
err = scoutfs_free_meta(sb, alloc, wri,
le64_to_cpu(left->hdr.blkno));
@@ -804,7 +905,7 @@ static int try_join(struct super_block *sb,
struct scoutfs_btree_item *sib_par_item;
struct scoutfs_btree_block *sib;
struct scoutfs_block *sib_bl;
struct scoutfs_block_ref *ref;
struct scoutfs_btree_ref *ref;
unsigned int sib_tot;
bool move_right;
int to_move;
@@ -1093,7 +1194,7 @@ static int btree_walk(struct super_block *sb,
struct scoutfs_btree_item *prev;
struct scoutfs_avl_node *next_node;
struct scoutfs_avl_node *node;
struct scoutfs_block_ref *ref;
struct scoutfs_btree_ref *ref;
unsigned int level;
unsigned int nr;
int ret;
@@ -1124,7 +1225,8 @@ restart:
if (!(flags & BTW_INSERT)) {
ret = -ENOENT;
} else {
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &root->ref, &bl);
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC,
&root->ref, &bl);
if (ret == 0) {
bt = bl->data;
init_btree_block(bt, 0);

View File

@@ -31,14 +31,16 @@
#include "net.h"
#include "endian_swap.h"
#include "quorum.h"
#include "omap.h"
/*
* The client is responsible for maintaining a connection to the server.
* This includes managing quorum elections that determine which client
* should run the server that all the clients connect to.
*/
#define CLIENT_CONNECT_DELAY_MS (MSEC_PER_SEC / 10)
#define CLIENT_CONNECT_TIMEOUT_MS (1 * MSEC_PER_SEC)
#define CLIENT_QUORUM_TIMEOUT_MS (5 * MSEC_PER_SEC)
struct client_info {
struct super_block *sb;
@@ -50,6 +52,7 @@ struct client_info {
struct delayed_work connect_dwork;
u64 server_term;
u64 greeting_umb;
bool sending_farewell;
int farewell_error;
@@ -118,14 +121,16 @@ int scoutfs_client_get_roots(struct super_block *sb,
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
__le64 leseq;
__le64 before = cpu_to_le64p(seq);
__le64 after;
int ret;
ret = scoutfs_net_sync_request(sb, client->conn,
SCOUTFS_NET_CMD_ADVANCE_SEQ,
NULL, 0, &leseq, sizeof(leseq));
&before, sizeof(before),
&after, sizeof(after));
if (ret == 0)
*seq = le64_to_cpu(leseq);
*seq = le64_to_cpu(after);
return ret;
}
@@ -151,7 +156,7 @@ static int client_lock_response(struct super_block *sb,
void *resp, unsigned int resp_len,
int error, void *data)
{
if (resp_len != sizeof(struct scoutfs_net_lock))
if (resp_len != sizeof(struct scoutfs_net_lock_grant_response))
return -EINVAL;
/* XXX error? */
@@ -216,39 +221,6 @@ int scoutfs_client_srch_commit_compact(struct super_block *sb,
res, sizeof(*res), NULL, 0);
}
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
struct scoutfs_open_ino_map *map)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
return scoutfs_net_response(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
id, 0, map, sizeof(*map));
}
/* The client is receiving an omap request from the server */
static int client_open_ino_map(struct super_block *sb, struct scoutfs_net_connection *conn,
u8 cmd, u64 id, void *arg, u16 arg_len)
{
if (arg_len != sizeof(struct scoutfs_open_ino_map_args))
return -EINVAL;
return scoutfs_omap_client_handle_request(sb, id, arg);
}
/* The client is sending an omap request to the server */
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
struct scoutfs_open_ino_map *map)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
struct scoutfs_open_ino_map_args args = {
.group_nr = cpu_to_le64(group_nr),
.req_id = 0,
};
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
&args, sizeof(args), map, sizeof(*map));
}
/* The client is receiving a invalidation request from the server */
static int client_lock(struct super_block *sb,
struct scoutfs_net_connection *conn, u8 cmd, u64 id,
@@ -310,10 +282,10 @@ static int client_greeting(struct super_block *sb,
goto out;
}
if (gr->version != super->version) {
if (gr->format_hash != super->format_hash) {
scoutfs_warn(sb, "server sent format 0x%llx, client has 0x%llx",
le64_to_cpu(gr->version),
le64_to_cpu(super->version));
le64_to_cpu(gr->format_hash),
le64_to_cpu(super->format_hash));
ret = -EINVAL;
goto out;
}
@@ -322,30 +294,52 @@ static int client_greeting(struct super_block *sb,
scoutfs_net_client_greeting(sb, conn, new_server);
client->server_term = le64_to_cpu(gr->server_term);
client->greeting_umb = le64_to_cpu(gr->unmount_barrier);
ret = 0;
out:
return ret;
}
/*
* The client is deciding if it needs to keep trying to reconnect to
* have its farewell request processed. The server removes our mounted
* client item last so that if we don't see it we know the server has
* processed our farewell and we don't need to reconnect, we can unmount
* safely.
* This work is responsible for maintaining a connection from the client
* to the server. It's queued on mount and disconnect and we requeue
* the work if the work fails and we're not shutting down.
*
* This is peeking at btree blocks that the server could be actively
* freeing with cow updates so it can see stale blocks, we just return
* the error and we'll retry eventually as the connection times out.
* In the typical case a mount reads the super blocks and finds the
* address of the currently running server and connects to it.
* Non-voting clients who can't connect will keep trying alternating
* reading the address and getting connect timeouts.
*
* Voting mounts will try to elect a leader if they can't connect to the
* server. When a quorum can't connect and are able to elect a leader
* then a new server is started. The new server will write its address
* in the super and everyone will be able to connect.
*
* There's a tricky bit of coordination required to safely unmount.
* Clients need to tell the server that they won't be coming back with a
* farewell request. Once a client receives its farewell response it
* can exit. But a majority of clients need to stick around to elect a
* server to process all their farewell requests. This is coordinated
* by having the greeting tell the server that a client is a voter. The
* server then holds on to farewell requests from voters until only
* requests from the final quorum remain. These farewell responses are
* only sent after updating an unmount barrier in the super to indicate
* to the final quorum that they can safely exit without having received
* a farewell response over the network.
*/
static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
static void scoutfs_client_connect_worker(struct work_struct *work)
{
struct scoutfs_key key = {
.sk_zone = SCOUTFS_MOUNTED_CLIENT_ZONE,
.skmc_rid = cpu_to_le64(rid),
};
struct scoutfs_super_block *super;
SCOUTFS_BTREE_ITEM_REF(iref);
struct client_info *client = container_of(work, struct client_info,
connect_dwork.work);
struct super_block *sb = client->sb;
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = NULL;
struct mount_options *opts = &sbi->opts;
const bool am_voter = opts->server_addr.sin_addr.s_addr != 0;
struct scoutfs_net_greeting greet;
struct sockaddr_in sin;
ktime_t timeout_abs;
u64 elected_term;
int ret;
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
@@ -358,77 +352,57 @@ static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
if (ret)
goto out;
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key, &iref);
if (ret == 0) {
scoutfs_btree_put_iref(&iref);
ret = 1;
}
if (ret == -ENOENT)
ret = 0;
kfree(super);
out:
return ret;
}
/*
* This work is responsible for maintaining a connection from the client
* to the server. It's queued on mount and disconnect and we requeue
* the work if the work fails and we're not shutting down.
*
* We ask quorum for an address to try and connect to. If there isn't
* one, or it fails, we back off a bit before trying again.
*
* There's a tricky bit of coordination required to safely unmount.
* Clients need to tell the server that they won't be coming back with a
* farewell request. Once the server processes a farewell request from
* the client it can forget the client. If the connection is broken
* before the client gets the farewell response it doesn't want to
* reconnect to send it again.. instead the client can read the metadata
* device to check for the lack of an item which indicates that the
* server has processed its farewell.
*/
static void scoutfs_client_connect_worker(struct work_struct *work)
{
struct client_info *client = container_of(work, struct client_info,
connect_dwork.work);
struct super_block *sb = client->sb;
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
struct mount_options *opts = &sbi->opts;
const bool am_quorum = opts->quorum_slot_nr >= 0;
struct scoutfs_net_greeting greet;
struct sockaddr_in sin;
int ret;
/* can unmount once server farewell handling removes our item */
if (client->sending_farewell &&
lookup_mounted_client_item(sb, sbi->rid) == 0) {
/* can safely unmount if we see that server processed our farewell */
if (am_voter && client->sending_farewell &&
(le64_to_cpu(super->unmount_barrier) > client->greeting_umb)) {
client->farewell_error = 0;
complete(&client->farewell_comp);
ret = 0;
goto out;
}
ret = scoutfs_quorum_server_sin(sb, &sin);
if (ret < 0)
goto out;
/* try to connect to the super's server address */
scoutfs_addr_to_sin(&sin, &super->server_addr);
if (sin.sin_addr.s_addr != 0 && sin.sin_port != 0)
ret = scoutfs_net_connect(sb, client->conn, &sin,
CLIENT_CONNECT_TIMEOUT_MS);
else
ret = -ENOTCONN;
ret = scoutfs_net_connect(sb, client->conn, &sin,
CLIENT_CONNECT_TIMEOUT_MS);
if (ret < 0)
/* voters try to elect a leader if they couldn't connect */
if (ret < 0) {
/* non-voters will keep retrying */
if (!am_voter)
goto out;
/* make sure local server isn't writing super during votes */
scoutfs_server_stop(sb);
timeout_abs = ktime_add_ms(ktime_get(),
CLIENT_QUORUM_TIMEOUT_MS);
ret = scoutfs_quorum_election(sb, timeout_abs,
le64_to_cpu(super->quorum_server_term),
&elected_term);
/* start the server if we were asked to */
if (elected_term > 0)
ret = scoutfs_server_start(sb, &opts->server_addr,
elected_term);
ret = -ENOTCONN;
goto out;
}
/* send a greeting to verify endpoints of each connection */
greet.fsid = super->hdr.fsid;
greet.version = super->version;
greet.format_hash = super->format_hash;
greet.server_term = cpu_to_le64(client->server_term);
greet.unmount_barrier = cpu_to_le64(client->greeting_umb);
greet.rid = cpu_to_le64(sbi->rid);
greet.flags = 0;
if (client->sending_farewell)
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_FAREWELL);
if (am_quorum)
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_QUORUM);
if (am_voter)
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_VOTER);
ret = scoutfs_net_submit_request(sb, client->conn,
SCOUTFS_NET_CMD_GREETING,
@@ -437,6 +411,7 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
if (ret)
scoutfs_net_shutdown(sb, client->conn);
out:
kfree(super);
/* always have a small delay before retrying to avoid storms */
if (ret && !atomic_read(&client->shutting_down))
@@ -447,7 +422,6 @@ out:
static scoutfs_net_request_t client_req_funcs[] = {
[SCOUTFS_NET_CMD_LOCK] = client_lock,
[SCOUTFS_NET_CMD_LOCK_RECOVER] = client_lock_recover,
[SCOUTFS_NET_CMD_OPEN_INO_MAP] = client_open_ino_map,
};
/*

View File

@@ -22,10 +22,6 @@ int scoutfs_client_srch_get_compact(struct super_block *sb,
struct scoutfs_srch_compact *sc);
int scoutfs_client_srch_commit_compact(struct super_block *sb,
struct scoutfs_srch_compact *res);
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
struct scoutfs_open_ino_map *map);
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
struct scoutfs_open_ino_map *map);
int scoutfs_client_setup(struct super_block *sb);
void scoutfs_client_destroy(struct super_block *sb);

315
kmod/src/count.h Normal file
View File

@@ -0,0 +1,315 @@
#ifndef _SCOUTFS_COUNT_H_
#define _SCOUTFS_COUNT_H_
/*
* Our estimate of the space consumed while dirtying items is based on
* the number of items and the size of their values.
*
* The estimate is still a read-only input to entering the transaction.
* We'd like to use it as a clean rhs arg to hold_trans. We define SIC_
* functions which return the count struct. This lets us have a single
* arg and avoid bugs in initializing and passing in struct pointers
* from callers. The internal __count functions are used compose an
* estimate out of the sets of items it manipulates. We program in much
* clearer C instead of in the preprocessor.
*
* Compilers are able to collapse the inlines into constants for the
* constant estimates.
*/
struct scoutfs_item_count {
signed items;
signed vals;
};
/* The caller knows exactly what they're doing. */
static inline const struct scoutfs_item_count SIC_EXACT(signed items,
signed vals)
{
struct scoutfs_item_count cnt = {
.items = items,
.vals = vals,
};
return cnt;
}
/*
* Allocating an inode creates a new set of indexed items.
*/
static inline void __count_alloc_inode(struct scoutfs_item_count *cnt)
{
const int nr_indices = SCOUTFS_INODE_INDEX_NR;
cnt->items += 1 + nr_indices;
cnt->vals += sizeof(struct scoutfs_inode);
}
/*
* Dirtying an inode dirties the inode item and can delete and create
* the full set of indexed items.
*/
static inline void __count_dirty_inode(struct scoutfs_item_count *cnt)
{
const int nr_indices = 2 * SCOUTFS_INODE_INDEX_NR;
cnt->items += 1 + nr_indices;
cnt->vals += sizeof(struct scoutfs_inode);
}
static inline const struct scoutfs_item_count SIC_ALLOC_INODE(void)
{
struct scoutfs_item_count cnt = {0,};
__count_alloc_inode(&cnt);
return cnt;
}
static inline const struct scoutfs_item_count SIC_DIRTY_INODE(void)
{
struct scoutfs_item_count cnt = {0,};
__count_dirty_inode(&cnt);
return cnt;
}
/*
* Directory entries are stored in three items.
*/
static inline void __count_dirents(struct scoutfs_item_count *cnt,
unsigned name_len)
{
cnt->items += 3;
cnt->vals += 3 * offsetof(struct scoutfs_dirent, name[name_len]);
}
static inline void __count_sym_target(struct scoutfs_item_count *cnt,
unsigned size)
{
unsigned nr = DIV_ROUND_UP(size, SCOUTFS_MAX_VAL_SIZE);
cnt->items += nr;
cnt->vals += size;
}
static inline void __count_orphan(struct scoutfs_item_count *cnt)
{
cnt->items += 1;
}
static inline void __count_mknod(struct scoutfs_item_count *cnt,
unsigned name_len)
{
__count_alloc_inode(cnt);
__count_dirents(cnt, name_len);
__count_dirty_inode(cnt);
}
static inline const struct scoutfs_item_count SIC_MKNOD(unsigned name_len)
{
struct scoutfs_item_count cnt = {0,};
__count_mknod(&cnt, name_len);
return cnt;
}
/*
* Dropping the inode deletes all its items. Potentially enormous numbers
* of items (data mapping, xattrs) are deleted in their own transactions.
*/
static inline const struct scoutfs_item_count SIC_DROP_INODE(int mode,
u64 size)
{
struct scoutfs_item_count cnt = {0,};
if (S_ISLNK(mode))
__count_sym_target(&cnt, size);
__count_dirty_inode(&cnt);
__count_orphan(&cnt);
cnt.vals = 0;
return cnt;
}
static inline const struct scoutfs_item_count SIC_LINK(unsigned name_len)
{
struct scoutfs_item_count cnt = {0,};
__count_dirents(&cnt, name_len);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
return cnt;
}
/*
* Unlink can add orphan items.
*/
static inline const struct scoutfs_item_count SIC_UNLINK(unsigned name_len)
{
struct scoutfs_item_count cnt = {0,};
__count_dirents(&cnt, name_len);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
__count_orphan(&cnt);
return cnt;
}
static inline const struct scoutfs_item_count SIC_SYMLINK(unsigned name_len,
unsigned size)
{
struct scoutfs_item_count cnt = {0,};
__count_mknod(&cnt, name_len);
__count_sym_target(&cnt, size);
return cnt;
}
/*
* This assumes the worst case of a rename between directories that
* unlinks an existing target. That'll be worse than the common case
* by a few hundred bytes.
*/
static inline const struct scoutfs_item_count SIC_RENAME(unsigned old_len,
unsigned new_len)
{
struct scoutfs_item_count cnt = {0,};
/* dirty dirs and inodes */
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
/* unlink old and new, link new */
__count_dirents(&cnt, old_len);
__count_dirents(&cnt, new_len);
__count_dirents(&cnt, new_len);
/* orphan the existing target */
__count_orphan(&cnt);
return cnt;
}
/*
* Creating an xattr results in a dirty set of items with values that
* store the xattr header, name, and value. There's always at least one
* item with the header and name. Any previously existing items are
* deleted which dirties their key but removes their value. The two
* sets of items are indexed by different ids so their items don't
* overlap.
*/
static inline const struct scoutfs_item_count SIC_XATTR_SET(unsigned old_parts,
bool creating,
unsigned name_len,
unsigned size)
{
struct scoutfs_item_count cnt = {0,};
unsigned int new_parts;
__count_dirty_inode(&cnt);
if (old_parts)
cnt.items += old_parts;
if (creating) {
new_parts = SCOUTFS_XATTR_NR_PARTS(name_len, size);
cnt.items += new_parts;
cnt.vals += sizeof(struct scoutfs_xattr) + name_len + size;
}
return cnt;
}
/*
* write_begin can have to allocate all the blocks in the page and can
* have to add a big allocation from the server to do so:
* - merge added free extents from the server
* - remove a free extent per block
* - remove an offline extent for every other block
* - add a file extent per block
*/
static inline const struct scoutfs_item_count SIC_WRITE_BEGIN(void)
{
struct scoutfs_item_count cnt = {0,};
unsigned nr_free = (1 + SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
unsigned nr_file = (DIV_ROUND_UP(SCOUTFS_BLOCK_SM_PER_PAGE, 2) +
SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
__count_dirty_inode(&cnt);
cnt.items += nr_free + nr_file;
cnt.vals += nr_file;
return cnt;
}
/*
* Truncating an extent can:
* - delete existing file extent,
* - create two surrounding file extents,
* - add an offline file extent,
* - delete two existing free extents
* - create a merged free extent
*/
static inline const struct scoutfs_item_count
SIC_TRUNC_EXTENT(struct inode *inode)
{
struct scoutfs_item_count cnt = {0,};
unsigned int nr_file = 1 + 2 + 1;
unsigned int nr_free = (2 + 1) * 2;
if (inode)
__count_dirty_inode(&cnt);
cnt.items += nr_file + nr_free;
cnt.vals += nr_file;
return cnt;
}
/*
* Fallocating an extent can, at most:
* - allocate from the server: delete two free and insert merged
* - free an allocated extent: delete one and create two split
* - remove an unallocated file extent: delete one and create two split
* - add an fallocated flie extent: delete two and inset one merged
*/
static inline const struct scoutfs_item_count SIC_FALLOCATE_ONE(void)
{
struct scoutfs_item_count cnt = {0,};
unsigned int nr_free = ((1 + 2) * 2) * 2;
unsigned int nr_file = (1 + 2) * 2;
__count_dirty_inode(&cnt);
cnt.items += nr_free + nr_file;
cnt.vals += nr_file;
return cnt;
}
/*
* ioc_setattr_more can dirty the inode and add a single offline extent.
*/
static inline const struct scoutfs_item_count SIC_SETATTR_MORE(void)
{
struct scoutfs_item_count cnt = {0,};
__count_dirty_inode(&cnt);
cnt.items++;
return cnt;
}
#endif

View File

@@ -20,21 +20,17 @@
EXPAND_COUNTER(alloc_list_freed_hi) \
EXPAND_COUNTER(alloc_move) \
EXPAND_COUNTER(alloc_moved_extent) \
EXPAND_COUNTER(alloc_stale_list_block) \
EXPAND_COUNTER(block_cache_access_update) \
EXPAND_COUNTER(alloc_stale_cached_list_block) \
EXPAND_COUNTER(block_cache_access) \
EXPAND_COUNTER(block_cache_alloc_failure) \
EXPAND_COUNTER(block_cache_alloc_page_order) \
EXPAND_COUNTER(block_cache_alloc_virt) \
EXPAND_COUNTER(block_cache_end_io_error) \
EXPAND_COUNTER(block_cache_forget) \
EXPAND_COUNTER(block_cache_free) \
EXPAND_COUNTER(block_cache_free_work) \
EXPAND_COUNTER(block_cache_remove_stale) \
EXPAND_COUNTER(block_cache_invalidate) \
EXPAND_COUNTER(block_cache_lru_move) \
EXPAND_COUNTER(block_cache_shrink) \
EXPAND_COUNTER(block_cache_shrink_next) \
EXPAND_COUNTER(block_cache_shrink_recent) \
EXPAND_COUNTER(block_cache_shrink_remove) \
EXPAND_COUNTER(block_cache_shrink_restart) \
EXPAND_COUNTER(btree_compact_values) \
EXPAND_COUNTER(btree_compact_values_enomem) \
EXPAND_COUNTER(btree_delete) \
@@ -46,6 +42,7 @@
EXPAND_COUNTER(btree_lookup) \
EXPAND_COUNTER(btree_next) \
EXPAND_COUNTER(btree_prev) \
EXPAND_COUNTER(btree_read_error) \
EXPAND_COUNTER(btree_split) \
EXPAND_COUNTER(btree_stale_read) \
EXPAND_COUNTER(btree_update) \
@@ -61,8 +58,6 @@
EXPAND_COUNTER(corrupt_symlink_inode_size) \
EXPAND_COUNTER(corrupt_symlink_missing_item) \
EXPAND_COUNTER(corrupt_symlink_not_null_term) \
EXPAND_COUNTER(data_fallocate_enobufs_retry) \
EXPAND_COUNTER(data_write_begin_enobufs_retry) \
EXPAND_COUNTER(dentry_revalidate_error) \
EXPAND_COUNTER(dentry_revalidate_invalid) \
EXPAND_COUNTER(dentry_revalidate_locked) \
@@ -76,7 +71,6 @@
EXPAND_COUNTER(ext_op_remove) \
EXPAND_COUNTER(forest_bloom_fail) \
EXPAND_COUNTER(forest_bloom_pass) \
EXPAND_COUNTER(forest_bloom_stale) \
EXPAND_COUNTER(forest_read_items) \
EXPAND_COUNTER(forest_roots_next_hint) \
EXPAND_COUNTER(forest_set_bloom_bits) \
@@ -143,21 +137,18 @@
EXPAND_COUNTER(net_recv_invalid_message) \
EXPAND_COUNTER(net_recv_messages) \
EXPAND_COUNTER(net_unknown_request) \
EXPAND_COUNTER(quorum_elected) \
EXPAND_COUNTER(quorum_fence_error) \
EXPAND_COUNTER(quorum_fence_leader) \
EXPAND_COUNTER(quorum_cycle) \
EXPAND_COUNTER(quorum_elected_leader) \
EXPAND_COUNTER(quorum_election_timeout) \
EXPAND_COUNTER(quorum_failure) \
EXPAND_COUNTER(quorum_read_block) \
EXPAND_COUNTER(quorum_read_block_error) \
EXPAND_COUNTER(quorum_read_invalid_block) \
EXPAND_COUNTER(quorum_recv_error) \
EXPAND_COUNTER(quorum_recv_heartbeat) \
EXPAND_COUNTER(quorum_recv_invalid) \
EXPAND_COUNTER(quorum_recv_resignation) \
EXPAND_COUNTER(quorum_recv_vote) \
EXPAND_COUNTER(quorum_send_heartbeat) \
EXPAND_COUNTER(quorum_send_resignation) \
EXPAND_COUNTER(quorum_send_request) \
EXPAND_COUNTER(quorum_send_vote) \
EXPAND_COUNTER(quorum_server_shutdown) \
EXPAND_COUNTER(quorum_term_follower) \
EXPAND_COUNTER(quorum_saw_super_leader) \
EXPAND_COUNTER(quorum_timedout) \
EXPAND_COUNTER(quorum_write_block) \
EXPAND_COUNTER(quorum_write_block_error) \
EXPAND_COUNTER(quorum_fenced) \
EXPAND_COUNTER(server_commit_hold) \
EXPAND_COUNTER(server_commit_queue) \
EXPAND_COUNTER(server_commit_worker) \
@@ -167,6 +158,7 @@
EXPAND_COUNTER(srch_compact_flush) \
EXPAND_COUNTER(srch_compact_log_page) \
EXPAND_COUNTER(srch_compact_removed_entry) \
EXPAND_COUNTER(srch_inconsistent_ref) \
EXPAND_COUNTER(srch_rotate_log) \
EXPAND_COUNTER(srch_search_log) \
EXPAND_COUNTER(srch_search_log_block) \

View File

@@ -37,6 +37,7 @@
#include "lock.h"
#include "file.h"
#include "msg.h"
#include "count.h"
#include "ext.h"
#include "util.h"
@@ -290,6 +291,7 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
u64 ino, u64 iblock, u64 last, bool offline,
struct scoutfs_lock *lock)
{
struct scoutfs_item_count cnt = SIC_TRUNC_EXTENT(inode);
struct scoutfs_inode_info *si = NULL;
LIST_HEAD(ind_locks);
s64 ret = 0;
@@ -313,9 +315,9 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
while (iblock <= last) {
if (inode)
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
true);
true, cnt);
else
ret = scoutfs_hold_trans(sb);
ret = scoutfs_hold_trans(sb, cnt);
if (ret)
break;
@@ -751,13 +753,13 @@ static int scoutfs_write_begin(struct file *file,
goto out;
}
retry:
do {
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &wbd->ind_locks, inode,
true) ?:
scoutfs_inode_index_try_lock_hold(sb, &wbd->ind_locks,
ind_seq);
ind_seq,
SIC_WRITE_BEGIN());
} while (ret > 0);
if (ret < 0)
goto out;
@@ -766,22 +768,17 @@ retry:
flags |= AOP_FLAG_NOFS;
/* generic write_end updates i_size and calls dirty_inode */
ret = scoutfs_dirty_inode_item(inode, wbd->lock) ?:
block_write_begin(mapping, pos, len, flags, pagep,
scoutfs_get_block_write);
if (ret < 0) {
ret = scoutfs_dirty_inode_item(inode, wbd->lock);
if (ret == 0)
ret = block_write_begin(mapping, pos, len, flags, pagep,
scoutfs_get_block_write);
if (ret)
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &wbd->ind_locks);
if (ret == -ENOBUFS) {
/* Retry with a new transaction. */
scoutfs_inc_counter(sb, data_write_begin_enobufs_retry);
goto retry;
}
}
out:
if (ret < 0)
if (ret) {
scoutfs_inode_index_unlock(sb, &wbd->ind_locks);
kfree(wbd);
}
return ret;
}
@@ -1010,7 +1007,8 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
while(iblock <= last) {
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_FALLOCATE_ONE());
if (ret)
goto out;
@@ -1028,12 +1026,6 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &ind_locks);
/* txn couldn't meet the request. Let's try with a new txn */
if (ret == -ENOBUFS) {
scoutfs_inc_counter(sb, data_fallocate_enobufs_retry);
continue;
}
if (ret <= 0)
goto out;
@@ -1086,7 +1078,8 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
}
/* we're updating meta_seq with offline block count */
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_SETATTR_MORE());
if (ret < 0)
goto out;
@@ -1135,8 +1128,7 @@ static void truncate_inode_pages_extent(struct inode *inode, u64 start, u64 len)
*/
#define MOVE_DATA_EXTENTS_PER_HOLD 16
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
u64 byte_len, struct inode *to, u64 to_off, bool is_stage,
u64 data_version)
u64 byte_len, struct inode *to, u64 to_off)
{
struct scoutfs_inode_info *from_si = SCOUTFS_I(from);
struct scoutfs_inode_info *to_si = SCOUTFS_I(to);
@@ -1146,7 +1138,6 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
struct data_ext_args from_args;
struct data_ext_args to_args;
struct scoutfs_extent ext;
struct timespec cur_time;
LIST_HEAD(locks);
bool done = false;
loff_t from_size;
@@ -1182,11 +1173,6 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
goto out;
}
if (is_stage && (data_version != SCOUTFS_I(to)->data_version)) {
ret = -ESTALE;
goto out;
}
from_iblock = from_off >> SCOUTFS_BLOCK_SM_SHIFT;
count = (byte_len + SCOUTFS_BLOCK_SM_MASK) >> SCOUTFS_BLOCK_SM_SHIFT;
to_iblock = to_off >> SCOUTFS_BLOCK_SM_SHIFT;
@@ -1209,7 +1195,7 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
/* can't stage once data_version changes */
scoutfs_inode_get_onoff(from, &junk, &from_offline);
scoutfs_inode_get_onoff(to, &junk, &to_offline);
if (from_offline || (to_offline && !is_stage)) {
if (from_offline || to_offline) {
ret = -ENODATA;
goto out;
}
@@ -1238,7 +1224,8 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
ret = scoutfs_inode_index_start(sb, &seq) ?:
scoutfs_inode_index_prepare(sb, &locks, from, true) ?:
scoutfs_inode_index_prepare(sb, &locks, to, true) ?:
scoutfs_inode_index_try_lock_hold(sb, &locks, seq);
scoutfs_inode_index_try_lock_hold(sb, &locks, seq,
SIC_EXACT(1, 1));
if (ret > 0)
continue;
if (ret < 0)
@@ -1253,8 +1240,6 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
/* arbitrarily limit the number of extents per trans hold */
for (i = 0; i < MOVE_DATA_EXTENTS_PER_HOLD; i++) {
struct scoutfs_extent off_ext;
/* find the next extent to move */
ret = scoutfs_ext_next(sb, &data_ext_ops, &from_args,
from_iblock, 1, &ext);
@@ -1283,27 +1268,10 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
to_start = to_iblock + (from_start - from_iblock);
if (is_stage) {
ret = scoutfs_ext_next(sb, &data_ext_ops, &to_args,
to_start, 1, &off_ext);
if (ret)
break;
if (!scoutfs_ext_inside(to_start, len, &off_ext) ||
!(off_ext.flags & SEF_OFFLINE)) {
ret = -EINVAL;
break;
}
ret = scoutfs_ext_set(sb, &data_ext_ops, &to_args,
to_start, len,
map, ext.flags);
} else {
/* insert the new, fails if it overlaps */
ret = scoutfs_ext_insert(sb, &data_ext_ops, &to_args,
to_start, len,
map, ext.flags);
}
/* insert the new, fails if it overlaps */
ret = scoutfs_ext_insert(sb, &data_ext_ops, &to_args,
to_start, len,
map, ext.flags);
if (ret < 0)
break;
@@ -1311,18 +1279,10 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
ret = scoutfs_ext_set(sb, &data_ext_ops, &from_args,
from_start, len, 0, 0);
if (ret < 0) {
if (is_stage) {
/* re-mark dest range as offline */
WARN_ON_ONCE(!(off_ext.flags & SEF_OFFLINE));
err = scoutfs_ext_set(sb, &data_ext_ops, &to_args,
to_start, len,
0, off_ext.flags);
} else {
/* remove inserted new on err */
err = scoutfs_ext_remove(sb, &data_ext_ops,
&to_args, to_start,
len);
}
/* remove inserted new on err */
err = scoutfs_ext_remove(sb, &data_ext_ops,
&to_args, to_start,
len);
BUG_ON(err); /* XXX inconsistent */
break;
}
@@ -1350,15 +1310,12 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
up_write(&from_si->extent_sem);
up_write(&to_si->extent_sem);
cur_time = CURRENT_TIME;
if (!is_stage) {
to->i_ctime = to->i_mtime = cur_time;
scoutfs_inode_inc_data_version(to);
scoutfs_inode_set_data_seq(to);
}
from->i_ctime = from->i_mtime = cur_time;
from->i_ctime = from->i_mtime =
to->i_ctime = to->i_mtime = CURRENT_TIME;
scoutfs_inode_inc_data_version(from);
scoutfs_inode_inc_data_version(to);
scoutfs_inode_set_data_seq(from);
scoutfs_inode_set_data_seq(to);
scoutfs_update_inode_item(from, from_lock, &locks);
scoutfs_update_inode_item(to, to_lock, &locks);

View File

@@ -59,8 +59,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len);
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
struct scoutfs_lock *lock);
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
u64 byte_len, struct inode *to, u64 to_off, bool to_stage,
u64 data_version);
u64 byte_len, struct inode *to, u64 to_off);
int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
u8 sef, u8 op, struct scoutfs_data_wait *ow,

View File

@@ -30,7 +30,6 @@
#include "item.h"
#include "lock.h"
#include "hash.h"
#include "omap.h"
#include "counters.h"
#include "scoutfs_trace.h"
@@ -464,18 +463,7 @@ out:
else
inode = scoutfs_iget(sb, ino);
/*
* We can't splice dir aliases into the dcache. dir entries
* might have changed on other nodes so our dcache could still
* contain them, rather than having been moved in rename. For
* dirs, we use d_materialize_unique to remove any existing
* aliases which must be stale. Our inode numbers aren't reused
* so inodes pointed to by entries can't change types.
*/
if (!IS_ERR_OR_NULL(inode) && S_ISDIR(inode->i_mode))
return d_materialise_unique(dentry, inode);
else
return d_splice_alias(inode, dentry);
return d_splice_alias(inode, dentry);
}
/*
@@ -667,6 +655,7 @@ static int del_entry_items(struct super_block *sb, u64 dir_ino, u64 hash,
*/
static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev,
const struct scoutfs_item_count cnt,
struct scoutfs_lock **dir_lock,
struct scoutfs_lock **inode_lock,
struct list_head *ind_locks)
@@ -705,7 +694,7 @@ retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, ind_locks, dir, true) ?:
scoutfs_inode_index_prepare_ino(sb, ind_locks, ino, mode) ?:
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq);
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq, cnt);
if (ret > 0)
goto retry;
if (ret)
@@ -752,6 +741,7 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
inode = lock_hold_create(dir, dentry, mode, rdev,
SIC_MKNOD(dentry->d_name.len),
&dir_lock, &inode_lock, &ind_locks);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -814,7 +804,6 @@ static int scoutfs_link(struct dentry *old_dentry,
struct scoutfs_lock *dir_lock;
struct scoutfs_lock *inode_lock = NULL;
LIST_HEAD(ind_locks);
bool del_orphan;
u64 dir_size;
u64 ind_seq;
u64 hash;
@@ -843,13 +832,12 @@ static int scoutfs_link(struct dentry *old_dentry,
goto out_unlock;
dir_size = i_size_read(dir) + dentry->d_name.len;
del_orphan = (inode->i_nlink == 0);
retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_LINK(dentry->d_name.len));
if (ret > 0)
goto retry;
if (ret)
@@ -859,12 +847,6 @@ retry:
if (ret)
goto out;
if (del_orphan) {
ret = scoutfs_orphan_dirty(sb, scoutfs_ino(inode));
if (ret)
goto out;
}
pos = SCOUTFS_I(dir)->next_readdir_pos++;
ret = add_entry_items(sb, scoutfs_ino(dir), hash, pos,
@@ -880,11 +862,6 @@ retry:
inode->i_ctime = dir->i_mtime;
inc_nlink(inode);
if (del_orphan) {
ret = scoutfs_orphan_delete(sb, scoutfs_ino(inode));
WARN_ON_ONCE(ret);
}
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
@@ -941,7 +918,8 @@ retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_UNLINK(dentry->d_name.len));
if (ret > 0)
goto retry;
if (ret)
@@ -1176,6 +1154,7 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
return ret;
inode = lock_hold_create(dir, dentry, S_IFLNK|S_IRWXUGO, 0,
SIC_SYMLINK(dentry->d_name.len, name_len),
&dir_lock, &inode_lock, &ind_locks);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -1607,7 +1586,9 @@ retry:
scoutfs_inode_index_prepare(sb, &ind_locks, new_dir, false)) ?:
(new_inode == NULL ? 0 :
scoutfs_inode_index_prepare(sb, &ind_locks, new_inode, false)) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_RENAME(old_dentry->d_name.len,
new_dentry->d_name.len));
if (ret > 0)
goto retry;
if (ret)
@@ -1775,42 +1756,6 @@ static int scoutfs_dir_open(struct inode *inode, struct file *file)
}
#endif
static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = NULL;
struct scoutfs_lock *dir_lock = NULL;
struct scoutfs_lock *inode_lock = NULL;
LIST_HEAD(ind_locks);
int ret;
if (dentry->d_name.len > SCOUTFS_NAME_LEN)
return -ENAMETOOLONG;
inode = lock_hold_create(dir, dentry, mode, 0,
&dir_lock, &inode_lock, &ind_locks);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
insert_inode_hash(inode);
d_tmpfile(dentry, inode);
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
scoutfs_inode_index_unlock(sb, &ind_locks);
ret = scoutfs_orphan_inode(inode);
WARN_ON_ONCE(ret); /* XXX returning error but items deleted */
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &ind_locks);
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
return ret;
}
const struct file_operations scoutfs_dir_fops = {
.KC_FOP_READDIR = scoutfs_readdir,
#ifdef KC_FMODE_KABI_ITERATE
@@ -1821,10 +1766,7 @@ const struct file_operations scoutfs_dir_fops = {
.llseek = generic_file_llseek,
};
const struct inode_operations_wrapper scoutfs_dir_iops = {
.ops = {
const struct inode_operations scoutfs_dir_iops = {
.lookup = scoutfs_lookup,
.mknod = scoutfs_mknod,
.create = scoutfs_create,
@@ -1841,8 +1783,6 @@ const struct inode_operations_wrapper scoutfs_dir_iops = {
.removexattr = scoutfs_removexattr,
.symlink = scoutfs_symlink,
.permission = scoutfs_permission,
},
.tmpfile = scoutfs_tmpfile,
};
void scoutfs_dir_exit(void)

View File

@@ -5,7 +5,7 @@
#include "lock.h"
extern const struct file_operations scoutfs_dir_fops;
extern const struct inode_operations_wrapper scoutfs_dir_iops;
extern const struct inode_operations scoutfs_dir_iops;
extern const struct inode_operations scoutfs_symlink_iops;
struct scoutfs_link_backref_entry {
@@ -14,7 +14,7 @@ struct scoutfs_link_backref_entry {
u64 dir_pos;
u16 name_len;
struct scoutfs_dirent dent;
/* the full name is allocated and stored in dent.name[] */
/* the full name is allocated and stored in dent.name[0] */
};
int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,

View File

@@ -38,7 +38,7 @@ static bool ext_overlap(struct scoutfs_extent *ext, u64 start, u64 len)
return !(e_end < start || ext->start > end);
}
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
static bool ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
{
u64 in_end = start + len - 1;
u64 out_end = out->start + out->len - 1;
@@ -241,7 +241,7 @@ int scoutfs_ext_remove(struct super_block *sb, struct scoutfs_ext_ops *ops,
goto out;
/* removed extent must be entirely within found */
if (!scoutfs_ext_inside(start, len, &found)) {
if (!ext_inside(start, len, &found)) {
ret = -EINVAL;
goto out;
}
@@ -341,7 +341,7 @@ int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
if (ret == 0 && ext_overlap(&found, start, len)) {
/* set extent must be entirely within found */
if (!scoutfs_ext_inside(start, len, &found)) {
if (!ext_inside(start, len, &found)) {
ret = -EINVAL;
goto out;
}

View File

@@ -31,6 +31,5 @@ int scoutfs_ext_alloc(struct super_block *sb, struct scoutfs_ext_ops *ops,
struct scoutfs_extent *ext);
int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
void *arg, u64 start, u64 len, u64 map, u8 flags);
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out);
#endif

View File

@@ -27,14 +27,8 @@
#include "file.h"
#include "inode.h"
#include "per_task.h"
#include "omap.h"
/*
* Start a high level file read. We check for offline extents in the
* read region here so that we only check the extents once. We use the
* dio count to prevent releasing while we're reading after we've
* checked the extents.
*/
/* TODO: Direct I/O, AIO */
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
@@ -48,32 +42,30 @@ ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
int ret;
retry:
/* protect checked extents from release */
mutex_lock(&inode->i_mutex);
atomic_inc(&inode->i_dio_count);
mutex_unlock(&inode->i_mutex);
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
if (ret)
goto out;
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
/* protect checked extents from stage/release */
mutex_lock(&inode->i_mutex);
atomic_inc(&inode->i_dio_count);
mutex_unlock(&inode->i_mutex);
ret = scoutfs_data_wait_check_iov(inode, iov, nr_segs, pos,
SEF_OFFLINE,
SCOUTFS_IOC_DWO_READ,
&dw, inode_lock);
if (ret != 0)
goto out;
} else {
WARN_ON_ONCE(true);
}
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
out:
inode_dio_done(inode);
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
if (scoutfs_per_task_del(&si->pt_data_lock, &pt_ent))
inode_dio_done(inode);
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
if (scoutfs_data_wait_found(&dw)) {

View File

@@ -66,8 +66,8 @@ struct forest_info {
struct forest_info *name = SCOUTFS_SB(sb)->forest_info
struct forest_refs {
struct scoutfs_block_ref fs_ref;
struct scoutfs_block_ref logs_ref;
struct scoutfs_btree_ref fs_ref;
struct scoutfs_btree_ref logs_ref;
};
/* initialize some refs that initially aren't equal */
@@ -96,16 +96,20 @@ static void calc_bloom_nrs(struct forest_bloom_nrs *bloom,
}
}
static struct scoutfs_block *read_bloom_ref(struct super_block *sb, struct scoutfs_block_ref *ref)
static struct scoutfs_block *read_bloom_ref(struct super_block *sb,
struct scoutfs_btree_ref *ref)
{
struct scoutfs_block *bl;
int ret;
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BLOOM, &bl);
if (ret < 0) {
if (ret == -ESTALE)
scoutfs_inc_counter(sb, forest_bloom_stale);
bl = ERR_PTR(ret);
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
if (IS_ERR(bl))
return bl;
if (!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
SCOUTFS_BLOCK_MAGIC_BLOOM)) {
scoutfs_block_invalidate(sb, bl);
scoutfs_block_put(sb, bl);
return ERR_PTR(-ESTALE);
}
return bl;
@@ -276,6 +280,7 @@ int scoutfs_forest_read_items(struct super_block *sb,
scoutfs_inc_counter(sb, forest_read_items);
calc_bloom_nrs(&bloom, &lock->start);
roots = lock->roots;
retry:
ret = scoutfs_client_get_roots(sb, &roots);
if (ret)
@@ -348,9 +353,15 @@ retry:
ret = 0;
out:
if (ret == -ESTALE) {
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
return -EIO;
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0) {
ret = -EIO;
goto out;
}
prev_refs = refs;
ret = scoutfs_client_get_roots(sb, &roots);
if (ret)
goto out;
goto retry;
}
@@ -370,14 +381,18 @@ out:
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
struct scoutfs_lock *lock)
{
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
DECLARE_FOREST_INFO(sb, finf);
struct scoutfs_block *new_bl = NULL;
struct scoutfs_block *bl = NULL;
struct scoutfs_bloom_block *bb;
struct scoutfs_block_ref *ref;
struct scoutfs_btree_ref *ref;
struct forest_bloom_nrs bloom;
int nr_set = 0;
u64 blkno;
u64 nr;
int ret;
int err;
int i;
nr = le64_to_cpu(finf->our_log.nr);
@@ -395,11 +410,53 @@ int scoutfs_forest_set_bloom_bits(struct super_block *sb,
ref = &finf->our_log.bloom_ref;
ret = scoutfs_block_dirty_ref(sb, finf->alloc, finf->wri, ref, SCOUTFS_BLOCK_MAGIC_BLOOM,
&bl, 0, NULL);
if (ret < 0)
goto unlock;
bb = bl->data;
if (ref->blkno) {
bl = read_bloom_ref(sb, ref);
if (IS_ERR(bl)) {
ret = PTR_ERR(bl);
goto unlock;
}
bb = bl->data;
}
if (!ref->blkno || !scoutfs_block_writer_is_dirty(sb, bl)) {
ret = scoutfs_alloc_meta(sb, finf->alloc, finf->wri, &blkno);
if (ret < 0)
goto unlock;
new_bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(new_bl)) {
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
blkno);
BUG_ON(err); /* could have dirtied */
ret = PTR_ERR(new_bl);
goto unlock;
}
if (bl) {
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
le64_to_cpu(ref->blkno));
BUG_ON(err); /* could have dirtied */
memcpy(new_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
} else {
memset(new_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
}
scoutfs_block_writer_mark_dirty(sb, finf->wri, new_bl);
scoutfs_block_put(sb, bl);
bl = new_bl;
bb = bl->data;
new_bl = NULL;
bb->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BLOOM);
bb->hdr.fsid = super->hdr.fsid;
bb->hdr.blkno = cpu_to_le64(blkno);
prandom_bytes(&bb->hdr.seq, sizeof(bb->hdr.seq));
ref->blkno = bb->hdr.blkno;
ref->seq = bb->hdr.seq;
}
for (i = 0; i < ARRAY_SIZE(bloom.nrs); i++) {
if (!test_and_set_bit_le(bloom.nrs[i], bb->bits)) {

View File

@@ -1,9 +1,6 @@
#ifndef _SCOUTFS_FORMAT_H_
#define _SCOUTFS_FORMAT_H_
#define SCOUTFS_INTEROP_VERSION 0ULL
#define SCOUTFS_INTEROP_VERSION_STR __stringify(0)
/* statfs(2) f_type */
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
@@ -14,7 +11,6 @@
#define SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK 0x897e4a7d
#define SCOUTFS_BLOCK_MAGIC_SRCH_PARENT 0xb23a2a05
#define SCOUTFS_BLOCK_MAGIC_ALLOC_LIST 0x8a93ac83
#define SCOUTFS_BLOCK_MAGIC_QUORUM 0xbc310868
/*
* The super block, quorum block, and file data allocation granularity
@@ -55,19 +51,15 @@
#define SCOUTFS_SUPER_BLKNO ((64ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
/*
* A small number of quorum blocks follow the super block, enough of
* them to match the starting offset of the super block so the region is
* aligned to the power of two that contains it.
* A reasonably large region of aligned quorum blocks follow the super
* block. Each voting cycle reads the entire region so we don't want it
* to be too enormous. 256K seems like a reasonably chunky single IO.
* The number of blocks in the region also determines the number of
* mounts that have a reasonable probability of not overwriting each
* other's random block locations.
*/
#define SCOUTFS_QUORUM_BLKNO (SCOUTFS_SUPER_BLKNO + 1)
#define SCOUTFS_QUORUM_BLOCKS (SCOUTFS_SUPER_BLKNO - 1)
/*
* Free metadata blocks start after the quorum blocks
*/
#define SCOUTFS_META_DEV_START_BLKNO \
((SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >> \
SCOUTFS_BLOCK_SM_LG_SHIFT)
#define SCOUTFS_QUORUM_BLKNO ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
#define SCOUTFS_QUORUM_BLOCKS ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
/*
* Start data on the data device aligned as well.
@@ -86,33 +78,11 @@ struct scoutfs_timespec {
__u8 __pad[4];
};
enum scoutfs_inet_family {
SCOUTFS_AF_NONE = 0,
SCOUTFS_AF_IPV4 = 1,
SCOUTFS_AF_IPV6 = 2,
};
struct scoutfs_inet_addr4 {
__le16 family;
__le16 port;
/* XXX ipv6 */
struct scoutfs_inet_addr {
__le32 addr;
};
/*
* Not yet supported by code.
*/
struct scoutfs_inet_addr6 {
__le16 family;
__le16 port;
__u8 addr[16];
__le32 flow_info;
__le32 scope_id;
__u8 __pad[4];
};
union scoutfs_inet_addr {
struct scoutfs_inet_addr4 v4;
struct scoutfs_inet_addr6 v6;
__u8 __pad[2];
};
/*
@@ -128,15 +98,6 @@ struct scoutfs_block_header {
__le64 blkno;
};
/*
* A reference to a block. The corresponding fields in the block_header
* must match after having read the block contents.
*/
struct scoutfs_block_ref {
__le64 blkno;
__le64 seq;
};
/*
* scoutfs identifies all file system metadata items by a small key
* struct.
@@ -195,6 +156,9 @@ struct scoutfs_key {
#define sklt_rid _sk_first
#define sklt_nr _sk_second
/* lock clients */
#define sklc_rid _sk_first
/* seqs */
#define skts_trans_seq _sk_first
#define skts_rid _sk_second
@@ -209,6 +173,19 @@ struct scoutfs_key {
#define skfl_neglen _sk_second
#define skfl_blkno _sk_third
struct scoutfs_radix_block {
struct scoutfs_block_header hdr;
union {
struct scoutfs_radix_ref {
__le64 blkno;
__le64 seq;
__le64 sm_total;
__le64 lg_total;
} refs[0];
__le64 bits[0];
};
};
struct scoutfs_avl_root {
__le16 node;
};
@@ -230,12 +207,17 @@ struct scoutfs_avl_node {
*/
#define SCOUTFS_BTREE_MAX_HEIGHT 20
struct scoutfs_btree_ref {
__le64 blkno;
__le64 seq;
};
/*
* A height of X means that the first block read will have level X-1 and
* the leaves will have level 0.
*/
struct scoutfs_btree_root {
struct scoutfs_block_ref ref;
struct scoutfs_btree_ref ref;
__u8 height;
__u8 __pad[7];
};
@@ -256,7 +238,7 @@ struct scoutfs_btree_block {
__le16 mid_free_len;
__u8 level;
__u8 __pad[7];
struct scoutfs_btree_item items[];
struct scoutfs_btree_item items[0];
/* leaf blocks have a fixed size item offset hash table at the end */
};
@@ -276,13 +258,18 @@ struct scoutfs_btree_block {
#define SCOUTFS_BTREE_LEAF_ITEM_HASH_BYTES \
(SCOUTFS_BTREE_LEAF_ITEM_HASH_NR * sizeof(__le16))
struct scoutfs_alloc_list_ref {
__le64 blkno;
__le64 seq;
};
/*
* first_nr tracks the nr of the first block in the list and is used for
* allocation sizing. total_nr is the sum of the nr of all the blocks in
* the list and is used for calculating total free block counts.
*/
struct scoutfs_alloc_list_head {
struct scoutfs_block_ref ref;
struct scoutfs_alloc_list_ref ref;
__le64 total_nr;
__le32 first_nr;
__u8 __pad[4];
@@ -301,10 +288,10 @@ struct scoutfs_alloc_list_head {
*/
struct scoutfs_alloc_list_block {
struct scoutfs_block_header hdr;
struct scoutfs_block_ref next;
struct scoutfs_alloc_list_ref next;
__le32 start;
__le32 nr;
__le64 blknos[]; /* naturally aligned for sorting */
__le64 blknos[0]; /* naturally aligned for sorting */
};
#define SCOUTFS_ALLOC_LIST_MAX_BLOCKS \
@@ -329,7 +316,7 @@ struct scoutfs_mounted_client_btree_val {
__u8 flags;
};
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
#define SCOUTFS_MOUNTED_CLIENT_VOTER (1 << 0)
/*
* srch files are a contiguous run of blocks with compressed entries
@@ -347,10 +334,15 @@ struct scoutfs_srch_entry {
#define SCOUTFS_SRCH_ENTRY_MAX_BYTES (2 + (sizeof(__u64) * 3))
struct scoutfs_srch_ref {
__le64 blkno;
__le64 seq;
};
struct scoutfs_srch_file {
struct scoutfs_srch_entry first;
struct scoutfs_srch_entry last;
struct scoutfs_block_ref ref;
struct scoutfs_srch_ref ref;
__le64 blocks;
__le64 entries;
__u8 height;
@@ -359,13 +351,13 @@ struct scoutfs_srch_file {
struct scoutfs_srch_parent {
struct scoutfs_block_header hdr;
struct scoutfs_block_ref refs[];
struct scoutfs_srch_ref refs[0];
};
#define SCOUTFS_SRCH_PARENT_REFS \
((SCOUTFS_BLOCK_LG_SIZE - \
offsetof(struct scoutfs_srch_parent, refs)) / \
sizeof(struct scoutfs_block_ref))
sizeof(struct scoutfs_srch_ref))
struct scoutfs_srch_block {
struct scoutfs_block_header hdr;
@@ -374,7 +366,7 @@ struct scoutfs_srch_block {
struct scoutfs_srch_entry tail;
__le32 entry_nr;
__le32 entry_bytes;
__u8 entries[];
__u8 entries[0];
};
/*
@@ -436,7 +428,7 @@ struct scoutfs_log_trees {
struct scoutfs_alloc_list_head meta_avail;
struct scoutfs_alloc_list_head meta_freed;
struct scoutfs_btree_root item_root;
struct scoutfs_block_ref bloom_ref;
struct scoutfs_btree_ref bloom_ref;
struct scoutfs_alloc_root data_avail;
struct scoutfs_alloc_root data_freed;
struct scoutfs_srch_file srch_file;
@@ -449,7 +441,7 @@ struct scoutfs_log_item_value {
__le64 vers;
__u8 flags;
__u8 __pad[7];
__u8 data[];
__u8 data[0];
};
/*
@@ -464,7 +456,7 @@ struct scoutfs_log_item_value {
struct scoutfs_bloom_block {
struct scoutfs_block_header hdr;
__le64 total_set;
__le64 bits[];
__le64 bits[0];
};
/*
@@ -490,10 +482,11 @@ struct scoutfs_bloom_block {
#define SCOUTFS_LOCK_ZONE 4
/* Items only stored in server btrees */
#define SCOUTFS_LOG_TREES_ZONE 6
#define SCOUTFS_TRANS_SEQ_ZONE 7
#define SCOUTFS_MOUNTED_CLIENT_ZONE 8
#define SCOUTFS_SRCH_ZONE 9
#define SCOUTFS_FREE_EXTENT_ZONE 10
#define SCOUTFS_LOCK_CLIENTS_ZONE 7
#define SCOUTFS_TRANS_SEQ_ZONE 8
#define SCOUTFS_MOUNTED_CLIENT_ZONE 9
#define SCOUTFS_SRCH_ZONE 10
#define SCOUTFS_FREE_EXTENT_ZONE 11
/* inode index zone */
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 1
@@ -545,7 +538,7 @@ struct scoutfs_xattr {
__le16 val_len;
__u8 name_len;
__u8 __pad[5];
__u8 name[];
__u8 name[0];
};
@@ -554,84 +547,56 @@ struct scoutfs_xattr {
#define SCOUTFS_UUID_BYTES 16
#define SCOUTFS_QUORUM_MAX_SLOTS 15
/*
* To elect a leader, members race to have their variable election
* timeouts expire. If they're first to send a vote request with a
* greater term to a majority of waiting members they'll be elected with
* a majority. If the timeouts are too close, the vote may be split and
* everyone will wait for another cycle of variable timeouts to expire.
*
* These determine how long it will take to elect a leader once there's
* no evidence of a server (no leader quorum blocks on mount; heartbeat
* timeout expired.)
* Mounts read all the quorum blocks and write to one random quorum
* block during a cycle. The min cycle time limits the per-mount iop
* load during elections. The random cycle delay makes it less likely
* that mounts will read and write at the same time and miss each
* other's writes. An election only completes if a quorum of mounts
* vote for a leader before any of their elections timeout. This is
* made less likely by the probability that mounts will overwrite each
* others random block locations. The max quorum count limits that
* probability. 9 mounts only have a 55% chance of writing to unique 4k
* blocks in a 256k region. The election timeout is set to include
* enough cycles to usually complete the election. Once a leader is
* elected it spends a number of cycles writing out blocks with itself
* logged as a leader. This reduces the possibility that servers
* will have their log entries overwritten and not be fenced.
*/
#define SCOUTFS_QUORUM_ELECT_MIN_MS 250
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
/*
* Once a leader is elected they send out heartbeats at regular
* intervals to force members to wait the much longer heartbeat timeout.
* Once heartbeat timeout expires without receiving a heartbeat they'll
* switch over the performing elections.
*
* These determine how long it could take members to notice that a
* leader has gone silent and start to elect a new leader.
*/
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
struct scoutfs_quorum_message {
__le64 fsid;
__le64 version;
__le64 term;
__u8 type;
__u8 from;
__u8 __pad[2];
__le32 crc;
};
/* a candidate requests a vote */
#define SCOUTFS_QUORUM_MSG_REQUEST_VOTE 0
/* followers send votes to candidates */
#define SCOUTFS_QUORUM_MSG_VOTE 1
/* elected leaders broadcast heartbeats to delay elections */
#define SCOUTFS_QUORUM_MSG_HEARTBEAT 2
/* leaders broadcast as they leave to break heartbeat timeout */
#define SCOUTFS_QUORUM_MSG_RESIGNATION 3
#define SCOUTFS_QUORUM_MSG_INVALID 4
/*
* The version is currently always 0, but will be used by mounts to
* discover that membership has changed.
*/
struct scoutfs_quorum_config {
__le64 version;
struct scoutfs_quorum_slot {
union scoutfs_inet_addr addr;
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
};
#define SCOUTFS_QUORUM_MAX_COUNT 9
#define SCOUTFS_QUORUM_CYCLE_LO_MS 10
#define SCOUTFS_QUORUM_CYCLE_HI_MS 20
#define SCOUTFS_QUORUM_TERM_LO_MS 250
#define SCOUTFS_QUORUM_TERM_HI_MS 500
#define SCOUTFS_QUORUM_ELECTED_LOG_CYCLES 10
struct scoutfs_quorum_block {
struct scoutfs_block_header hdr;
__le64 fsid;
__le64 blkno;
__le64 term;
__le64 random_write_mark;
__le64 flags;
struct scoutfs_quorum_block_event {
__le64 write_nr;
__le64 voter_rid;
__le64 vote_for_rid;
__le32 crc;
__u8 log_nr;
__u8 __pad[3];
struct scoutfs_quorum_log {
__le64 term;
__le64 rid;
struct scoutfs_timespec ts;
} write, update_term, set_leader, clear_leader, fenced;
struct scoutfs_inet_addr addr;
} log[0];
};
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
#define SCOUTFS_QUORUM_LOG_MAX \
((SCOUTFS_BLOCK_SM_SIZE - sizeof(struct scoutfs_quorum_block)) / \
sizeof(struct scoutfs_quorum_log))
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
struct scoutfs_super_block {
struct scoutfs_block_header hdr;
__le64 id;
__le64 version;
__le64 format_hash;
__le64 flags;
__u8 uuid[SCOUTFS_UUID_BYTES];
__le64 next_ino;
@@ -642,13 +607,19 @@ struct scoutfs_super_block {
__le64 total_data_blocks;
__le64 first_data_blkno;
__le64 last_data_blkno;
struct scoutfs_quorum_config qconf;
__le64 quorum_fenced_term;
__le64 quorum_server_term;
__le64 unmount_barrier;
__u8 quorum_count;
__u8 __pad[7];
struct scoutfs_inet_addr server_addr;
struct scoutfs_alloc_root meta_alloc[2];
struct scoutfs_alloc_root data_alloc;
struct scoutfs_alloc_list_head server_meta_avail[2];
struct scoutfs_alloc_list_head server_meta_freed[2];
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
struct scoutfs_btree_root lock_clients;
struct scoutfs_btree_root trans_seqs;
struct scoutfs_btree_root mounted_clients;
struct scoutfs_btree_root srch_root;
@@ -724,7 +695,7 @@ struct scoutfs_dirent {
__le64 pos;
__u8 type;
__u8 __pad[7];
__u8 name[];
__u8 name[0];
};
#define SCOUTFS_NAME_LEN 255
@@ -775,6 +746,12 @@ enum scoutfs_dentry_type {
* the same serer after receiving a greeting response and to a new
* server after failover.
*
* @unmount_barrier: Incremented every time the remaining majority of
* quorum members all agree to leave. The server tells a quorum member
* the value that it's connecting under so that if the client sees the
* value increase in the super block then it knows that the server has
* processed its farewell and can safely unmount.
*
* @rid: The client's random id that was generated once as the mount
* started up. This identifies a specific remote mount across
* connections and servers. It's set to the client's rid in both the
@@ -782,14 +759,15 @@ enum scoutfs_dentry_type {
*/
struct scoutfs_net_greeting {
__le64 fsid;
__le64 version;
__le64 format_hash;
__le64 server_term;
__le64 unmount_barrier;
__le64 rid;
__le64 flags;
};
#define SCOUTFS_NET_GREETING_FLAG_FAREWELL (1 << 0)
#define SCOUTFS_NET_GREETING_FLAG_QUORUM (1 << 1)
#define SCOUTFS_NET_GREETING_FLAG_VOTER (1 << 1)
#define SCOUTFS_NET_GREETING_FLAG_INVALID (~(__u64)0 << 2)
/*
@@ -822,7 +800,7 @@ struct scoutfs_net_header {
__u8 flags;
__u8 error;
__u8 __pad[3];
__u8 data[];
__u8 data[0];
};
#define SCOUTFS_NET_FLAG_RESPONSE (1 << 0)
@@ -840,7 +818,6 @@ enum scoutfs_net_cmd {
SCOUTFS_NET_CMD_LOCK_RECOVER,
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
SCOUTFS_NET_CMD_OPEN_INO_MAP,
SCOUTFS_NET_CMD_FAREWELL,
SCOUTFS_NET_CMD_UNKNOWN,
};
@@ -891,10 +868,15 @@ struct scoutfs_net_lock {
__u8 __pad[6];
};
struct scoutfs_net_lock_grant_response {
struct scoutfs_net_lock nl;
struct scoutfs_net_roots roots;
};
struct scoutfs_net_lock_recover {
__le16 nr;
__u8 __pad[6];
struct scoutfs_net_lock locks[];
struct scoutfs_net_lock locks[0];
};
#define SCOUTFS_NET_LOCK_MAX_RECOVER_NR \
@@ -961,42 +943,4 @@ enum scoutfs_corruption_sources {
#define SC_NR_LONGS DIV_ROUND_UP(SC_NR_SOURCES, BITS_PER_LONG)
#define SCOUTFS_OPEN_INO_MAP_SHIFT 10
#define SCOUTFS_OPEN_INO_MAP_BITS (1 << SCOUTFS_OPEN_INO_MAP_SHIFT)
#define SCOUTFS_OPEN_INO_MAP_MASK (SCOUTFS_OPEN_INO_MAP_BITS - 1)
#define SCOUTFS_OPEN_INO_MAP_LE64S (SCOUTFS_OPEN_INO_MAP_BITS / 64)
/*
* The request and response conversation is as follows:
*
* client[init] -> server:
* group_nr = G
* req_id = 0 (I)
* server -> client[*]
* group_nr = G
* req_id = R
* client[*] -> server
* group_nr = G (I)
* req_id = R
* bits
* server -> client[init]
* group_nr = G (I)
* req_id = R (I)
* bits
*
* Many of the fields in individual messages are ignored ("I") because
* the net id or the omap req_id can be used to identify the
* conversation. We always include them on the wire to make inspected
* messages easier to follow.
*/
struct scoutfs_open_ino_map_args {
__le64 group_nr;
__le64 req_id;
};
struct scoutfs_open_ino_map {
struct scoutfs_open_ino_map_args args;
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
};
#endif

View File

@@ -33,7 +33,6 @@
#include "item.h"
#include "client.h"
#include "cmp.h"
#include "omap.h"
/*
* XXX
@@ -83,8 +82,6 @@ static void scoutfs_inode_ctor(void *obj)
init_waitqueue_head(&si->data_waitq.waitq);
init_rwsem(&si->xattr_rwsem);
RB_CLEAR_NODE(&si->writeback_node);
scoutfs_lock_init_coverage(&si->ino_lock_cov);
atomic_set(&si->inv_iput_count, 0);
inode_init_once(&si->inode);
}
@@ -144,15 +141,12 @@ static void remove_writeback_inode(struct inode_sb_info *inf,
void scoutfs_destroy_inode(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
DECLARE_INODE_SB_INFO(inode->i_sb, inf);
spin_lock(&inf->writeback_lock);
remove_writeback_inode(inf, SCOUTFS_I(inode));
spin_unlock(&inf->writeback_lock);
scoutfs_lock_del_coverage(inode->i_sb, &si->ino_lock_cov);
call_rcu(&inode->i_rcu, scoutfs_i_callback);
}
@@ -188,8 +182,7 @@ static void set_inode_ops(struct inode *inode)
inode->i_fop = &scoutfs_file_fops;
break;
case S_IFDIR:
inode->i_op = &scoutfs_dir_iops.ops;
inode->i_flags |= S_IOPS_WRAPPER;
inode->i_op = &scoutfs_dir_iops;
inode->i_fop = &scoutfs_dir_fops;
break;
case S_IFLNK:
@@ -313,8 +306,6 @@ int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
if (ret == 0) {
load_inode(inode, &sinode);
atomic64_set(&si->last_refreshed, refresh_gen);
scoutfs_lock_add_coverage(sb, lock, &si->ino_lock_cov);
si->drop_invalidated = false;
}
} else {
ret = 0;
@@ -352,7 +343,8 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
if (!S_ISREG(inode->i_mode))
return 0;
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true);
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true,
SIC_DIRTY_INODE());
if (ret)
return ret;
@@ -379,7 +371,8 @@ static int clear_truncate_flag(struct inode *inode, struct scoutfs_lock *lock)
LIST_HEAD(ind_locks);
int ret;
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_DIRTY_INODE());
if (ret)
return ret;
@@ -494,7 +487,8 @@ retry:
}
}
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_DIRTY_INODE());
if (ret)
goto out;
@@ -672,28 +666,6 @@ struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino)
return ilookup5(sb, ino, scoutfs_iget_test, &ino);
}
static int iget_test_nofreeing(struct inode *inode, void *arg)
{
return !(inode->i_state & I_FREEING) && scoutfs_iget_test(inode, arg);
}
/*
* There's a natural risk of a deadlock between lock invalidation and
* eviction. Invalidation blocks locks while looking up inodes and
* invalidating local caches. Inode eviction gets a lock to check final
* inode deletion while the inode is marked FREEING which blocks
* lookups.
*
* We have a lookup variant which doesn't return I_FREEING inodes
* instead of waiting on them. If an inode has made it to I_FREEING
* then it doesn't have any local caches that are reachable and the lock
* invalidation promise is kept.
*/
struct inode *scoutfs_ilookup_nofreeing(struct super_block *sb, u64 ino)
{
return ilookup5(sb, ino, iget_test_nofreeing, &ino);
}
struct inode *scoutfs_iget(struct super_block *sb, u64 ino)
{
struct scoutfs_lock *lock = NULL;
@@ -718,8 +690,6 @@ struct inode *scoutfs_iget(struct super_block *sb, u64 ino)
atomic64_set(&si->last_refreshed, 0);
ret = scoutfs_inode_refresh(inode, lock, 0);
if (ret == 0)
ret = scoutfs_omap_inc(sb, ino);
if (ret) {
iget_failed(inode);
inode = ERR_PTR(ret);
@@ -1219,7 +1189,8 @@ int scoutfs_inode_index_start(struct super_block *sb, u64 *seq)
* Returns > 0 if the seq changed and the locks should be retried.
*/
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
struct list_head *list, u64 seq)
struct list_head *list, u64 seq,
const struct scoutfs_item_count cnt)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct index_lock *ind_lock;
@@ -1235,7 +1206,7 @@ int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
goto out;
}
ret = scoutfs_hold_trans(sb);
ret = scoutfs_hold_trans(sb, cnt);
if (ret == 0 && seq != sbi->trans_seq) {
scoutfs_release_trans(sb);
ret = 1;
@@ -1249,7 +1220,8 @@ out:
}
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
bool set_data_seq)
bool set_data_seq,
const struct scoutfs_item_count cnt)
{
struct super_block *sb = inode->i_sb;
int ret;
@@ -1259,7 +1231,7 @@ int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
ret = scoutfs_inode_index_start(sb, &seq) ?:
scoutfs_inode_index_prepare(sb, list, inode,
set_data_seq) ?:
scoutfs_inode_index_try_lock_hold(sb, list, seq);
scoutfs_inode_index_try_lock_hold(sb, list, seq, cnt);
} while (ret > 0);
return ret;
@@ -1416,8 +1388,6 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
si->next_xattr_id = 0;
si->have_item = false;
atomic64_set(&si->last_refreshed, lock->refresh_gen);
scoutfs_lock_add_coverage(sb, lock, &si->ino_lock_cov);
si->drop_invalidated = false;
si->flags = 0;
scoutfs_inode_set_meta_seq(inode);
@@ -1433,17 +1403,10 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
store_inode(&sinode, inode);
init_inode_key(&key, scoutfs_ino(inode));
ret = scoutfs_omap_inc(sb, ino);
if (ret < 0)
goto out;
ret = scoutfs_item_create(sb, &key, &sinode, sizeof(sinode), lock);
if (ret < 0)
scoutfs_omap_dec(sb, ino);
out:
if (ret) {
iput(inode);
inode = ERR_PTR(ret);
return ERR_PTR(ret);
}
return inode;
@@ -1459,18 +1422,7 @@ static void init_orphan_key(struct scoutfs_key *key, u64 rid, u64 ino)
};
}
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_lock *lock = sbi->rid_lock;
struct scoutfs_key key;
init_orphan_key(&key, sbi->rid, ino);
return scoutfs_item_dirty(sb, &key, lock);
}
int scoutfs_orphan_delete(struct super_block *sb, u64 ino)
static int remove_orphan_item(struct super_block *sb, u64 ino)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_lock *lock = sbi->rid_lock;
@@ -1488,15 +1440,15 @@ int scoutfs_orphan_delete(struct super_block *sb, u64 ino)
/*
* Remove all the items associated with a given inode. This is only
* called once nlink has dropped to zero and nothing has the inode open
* so we don't have to worry about dirents referencing the inode or link
* backrefs. Dropping nlink to 0 also created an orphan item. That
* orphan item will continue triggering attempts to finish previous
* partial deletion until all deletion is complete and the orphan item
* is removed.
* called once nlink has dropped to zero so we don't have to worry about
* dirents referencing the inode or link backrefs. Dropping nlink to 0
* also created an orphan item. That orphan item will continue
* triggering attempts to finish previous partial deletion until all
* deletion is complete and the orphan item is removed.
*/
static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lock *lock)
static int delete_inode_items(struct super_block *sb, u64 ino)
{
struct scoutfs_lock *lock = NULL;
struct scoutfs_inode sinode;
struct scoutfs_key key;
LIST_HEAD(ind_locks);
@@ -1506,6 +1458,10 @@ static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lo
u64 size;
int ret;
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &lock);
if (ret)
return ret;
init_inode_key(&key, ino);
ret = scoutfs_item_lookup_exact(sb, &key, &sinode, sizeof(sinode),
@@ -1543,7 +1499,8 @@ static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lo
retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
prepare_index_deletion(sb, &ind_locks, ino, mode, &sinode) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_DROP_INODE(mode, size));
if (ret > 0)
goto retry;
if (ret)
@@ -1565,29 +1522,23 @@ retry:
if (ret)
goto out;
ret = scoutfs_orphan_delete(sb, ino);
ret = remove_orphan_item(sb, ino);
out:
if (release)
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &ind_locks);
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
return ret;
}
/*
* iput_final has already written out the dirty pages to the inode
* before we get here. We're left with a clean inode that we have to
* tear down. We use locking and open inode number bitmaps to decide if
* we should finally destroy an inode that is no longer open nor
* reachable through directory entries.
* tear down. If there are no more links to the inode then we also
* remove all its persistent structures.
*/
void scoutfs_evict_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const u64 ino = scoutfs_ino(inode);
struct scoutfs_lock *lock;
int ret;
trace_scoutfs_evict_inode(inode->i_sb, scoutfs_ino(inode),
inode->i_nlink, is_bad_inode(inode));
@@ -1596,45 +1547,19 @@ void scoutfs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
ret = scoutfs_omap_should_delete(sb, inode, &lock);
if (ret > 0) {
ret = delete_inode_items(inode->i_sb, scoutfs_ino(inode), lock);
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
}
if (ret < 0)
scoutfs_err(sb, "error %d while checking to delete inode nr %llu, it might linger.",
ret, ino);
scoutfs_omap_dec(sb, ino);
if (inode->i_nlink == 0)
delete_inode_items(inode->i_sb, scoutfs_ino(inode));
clear:
clear_inode(inode);
}
/*
* We want to remove inodes from the cache as their count goes to 0 if
* they're no longer covered by a cluster lock or if while locked they
* were unlinked.
*
* We don't want unused cached inodes to linger outside of cluster
* locking so that they don't prevent final inode deletion on other
* nodes. We don't have specific per-inode or per-dentry locks which
* would otherwise remove the stale caches as they're invalidated.
* Stale cached inodes provide little value because they're going to be
* refreshed the next time they're locked. Populating the item cache
* and loading the inode item is a lot more expensive than initializing
* and inserting a newly allocated vfs inode.
*/
int scoutfs_drop_inode(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
struct super_block *sb = inode->i_sb;
int ret = generic_drop_inode(inode);
trace_scoutfs_drop_inode(sb, scoutfs_ino(inode), inode->i_nlink, inode_unhashed(inode),
si->drop_invalidated);
return si->drop_invalidated || !scoutfs_lock_is_covered(sb, &si->ino_lock_cov) ||
generic_drop_inode(inode);
trace_scoutfs_drop_inode(inode->i_sb, scoutfs_ino(inode),
inode->i_nlink, inode_unhashed(inode));
return ret;
}
/*
@@ -1651,10 +1576,8 @@ int scoutfs_scan_orphans(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_lock *lock = sbi->rid_lock;
struct scoutfs_lock *inode_lock = NULL;
struct scoutfs_key key;
struct scoutfs_key last;
u64 ino;
int err = 0;
int ret;
@@ -1670,13 +1593,7 @@ int scoutfs_scan_orphans(struct super_block *sb)
if (ret < 0)
goto out;
ino = le64_to_cpu(key.sko_ino);
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &inode_lock);
if (ret == 0) {
ret = delete_inode_items(sb, le64_to_cpu(key.sko_ino), inode_lock);
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
}
ret = delete_inode_items(sb, le64_to_cpu(key.sko_ino));
if (ret && ret != -ENOENT && !err)
err = ret;
@@ -1710,28 +1627,19 @@ int scoutfs_orphan_inode(struct inode *inode)
}
/*
* Track an inode that could have dirty pages. Used to kick off
* writeback on all dirty pages during transaction commit without tying
* ourselves in knots trying to call through the high level vfs sync
* methods.
*
* This is called by writers who hold the inode and transaction. The
* inode's presence in the rbtree is removed by destroy_inode, prevented
* by the inode hold, and by committing the transaction, which is
* prevented by holding the transaction. The inode can only go from
* empty to on the rbtree while we're here.
* Track an inode that could have dirty pages. Used to kick off writeback
* on all dirty pages during transaction commit without tying ourselves in
* knots trying to call through the high level vfs sync methods.
*/
void scoutfs_inode_queue_writeback(struct inode *inode)
{
DECLARE_INODE_SB_INFO(inode->i_sb, inf);
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
if (RB_EMPTY_NODE(&si->writeback_node)) {
spin_lock(&inf->writeback_lock);
if (RB_EMPTY_NODE(&si->writeback_node))
insert_writeback_inode(inf, si);
spin_unlock(&inf->writeback_lock);
}
spin_lock(&inf->writeback_lock);
if (RB_EMPTY_NODE(&si->writeback_node))
insert_writeback_inode(inf, si);
spin_unlock(&inf->writeback_lock);
}
/*

View File

@@ -4,6 +4,7 @@
#include "key.h"
#include "lock.h"
#include "per_task.h"
#include "count.h"
#include "format.h"
#include "data.h"
@@ -51,13 +52,6 @@ struct scoutfs_inode_info {
struct rw_semaphore xattr_rwsem;
struct rb_node writeback_node;
struct scoutfs_lock_coverage ino_lock_cov;
/* drop if i_count hits 0, allows drop while invalidate holds coverage */
bool drop_invalidated;
struct llist_node inv_iput_llnode;
atomic_t inv_iput_count;
struct inode inode;
};
@@ -79,7 +73,6 @@ int scoutfs_orphan_inode(struct inode *inode);
struct inode *scoutfs_iget(struct super_block *sb, u64 ino);
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
struct inode *scoutfs_ilookup_nofreeing(struct super_block *sb, u64 ino);
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
u32 minor, u64 ino);
@@ -90,9 +83,11 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
struct list_head *list, u64 ino,
umode_t mode);
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
struct list_head *list, u64 seq);
struct list_head *list, u64 seq,
const struct scoutfs_item_count cnt);
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
bool set_data_seq);
bool set_data_seq,
const struct scoutfs_item_count cnt);
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);
@@ -122,8 +117,6 @@ int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
int scoutfs_scan_orphans(struct super_block *sb);
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino);
int scoutfs_orphan_delete(struct super_block *sb, u64 ino);
void scoutfs_inode_queue_writeback(struct inode *inode);
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);

View File

@@ -674,7 +674,8 @@ static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
/* setting only so we don't see 0 data seq with nonzero data_version */
set_data_seq = sm.data_version != 0 ? true : false;
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq);
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq,
SIC_SETATTR_MORE());
if (ret)
goto unlock;
@@ -972,18 +973,12 @@ static long scoutfs_ioc_move_blocks(struct file *file, unsigned long arg)
goto out;
}
if (mb.flags & SCOUTFS_IOC_MB_UNKNOWN) {
ret = -EINVAL;
goto out;
}
ret = mnt_want_write_file(file);
if (ret < 0)
goto out;
ret = scoutfs_data_move_blocks(from, mb.from_off, mb.len,
to, mb.to_off, !!(mb.flags & SCOUTFS_IOC_MB_STAGE),
mb.data_version);
to, mb.to_off);
mnt_drop_write_file(file);
out:
fput(from_file);

View File

@@ -163,7 +163,7 @@ struct scoutfs_ioctl_ino_path_result {
__u64 dir_pos;
__u16 path_bytes;
__u8 _pad[6];
__u8 path[];
__u8 path[0];
};
/* Get a single path from the root to the given inode number */
@@ -259,7 +259,7 @@ struct scoutfs_ioctl_data_waiting {
__u8 _pad[6];
};
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U64_MAX << 0)
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U8_MAX << 0)
#define SCOUTFS_IOC_DATA_WAITING _IOR(SCOUTFS_IOCTL_MAGIC, 6, \
struct scoutfs_ioctl_data_waiting)
@@ -279,7 +279,7 @@ struct scoutfs_ioctl_setattr_more {
};
#define SCOUTFS_IOC_SETATTR_MORE_OFFLINE (1 << 0)
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U64_MAX << 1)
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U8_MAX << 1)
#define SCOUTFS_IOC_SETATTR_MORE _IOW(SCOUTFS_IOCTL_MAGIC, 7, \
struct scoutfs_ioctl_setattr_more)
@@ -418,13 +418,12 @@ struct scoutfs_ioctl_alloc_detail_entry {
* on the same file system.
*
* from_fd specifies the source file and the ioctl is called on the
* destination file. Both files must have write access. from_off specifies
* the byte offset in the source, to_off is the byte offset in the
* destination, and len is the number of bytes in the region to move. All of
* the offsets and lengths must be in multiples of 4KB, except in the case
* where the from_off + len ends at the i_size of the source
* file. data_version is only used when STAGE flag is set (see below). flags
* field is currently only used to optionally specify STAGE behavior.
* destination file. Both files must have write access. from_off
* specifies the byte offset in the source, to_off is the byte offset in
* the destination, and len is the number of bytes in the region to
* move. All of the offsets and lengths must be in multiples of 4KB,
* except in the case where the from_off + len ends at the i_size of the
* source file.
*
* This interface only moves extents which are block granular, it does
* not perform RMW of sub-block byte extents and it does not overwrite
@@ -436,41 +435,30 @@ struct scoutfs_ioctl_alloc_detail_entry {
* i_size. The i_size update will maintain final partial blocks in the
* source.
*
* If STAGE flag is not set, it will return an error if either of the files
* have offline extents. It will return 0 when all of the extents in the
* source region have been moved to the destination. Moving extents updates
* the ctime, mtime, meta_seq, data_seq, and data_version fields of both the
* source and destination inodes. If an error is returned then partial
* It will return an error if either of the files have offline extents.
* It will return 0 when all of the extents in the source region have
* been moved to the destination. Moving extents updates the ctime,
* mtime, meta_seq, data_seq, and data_version fields of both the source
* and destination inodes. If an error is returned then partial
* progress may have been made and inode fields may have been updated.
*
* If STAGE flag is set, as above except destination range must be in an
* offline extent. Fields are updated only for source inode.
*
* Errors specific to this interface include:
*
* EINVAL: from_off, len, or to_off aren't a multiple of 4KB; the source
* and destination files are the same inode; either the source or
* destination is not a regular file; the destination file has
* an existing overlapping extent (if STAGE flag not set); the
* destination range is not in an offline extent (if STAGE set).
* an existing overlapping extent.
* EOVERFLOW: either from_off + len or to_off + len exceeded 64bits.
* EBADF: from_fd isn't a valid open file descriptor.
* EXDEV: the source and destination files are in different filesystems.
* EISDIR: either the source or destination is a directory.
* ENODATA: either the source or destination file have offline extents and
* STAGE flag is not set.
* ESTALE: data_version does not match destination data_version.
* ENODATA: either the source or destination file have offline extents.
*/
#define SCOUTFS_IOC_MB_STAGE (1 << 0)
#define SCOUTFS_IOC_MB_UNKNOWN (U64_MAX << 1)
struct scoutfs_ioctl_move_blocks {
__u64 from_fd;
__u64 from_off;
__u64 len;
__u64 to_off;
__u64 data_version;
__u64 flags;
};
#define SCOUTFS_IOC_MOVE_BLOCKS _IOR(SCOUTFS_IOCTL_MAGIC, 13, \

View File

@@ -1339,10 +1339,7 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
/* split needs multiple items, sparse may not have enough */
if (!left)
return -ENOMEM;
compact_page_items(sb, pg, left);
found = item_rbtree_walk(&pg->item_root, key, NULL, &par,
&pnode);
}
item = alloc_item(pg, key, liv, val, val_len);
@@ -1494,8 +1491,6 @@ retry:
rbtree_erase(&rd->node, &root);
rbtree_insert(&rd->node, par, pnode, &cinf->pg_root);
lru_accessed(sb, cinf, rd);
trace_scoutfs_item_read_page(sb, key, &rd->start,
&rd->end);
continue;
}
@@ -2347,8 +2342,6 @@ retry:
write_lock(&pg->rwlock);
pgi = trim_page_intersection(sb, cinf, pg, right, start, end);
trace_scoutfs_item_invalidate_page(sb, start, end,
&pg->start, &pg->end, pgi);
BUG_ON(pgi == PGI_DISJOINT); /* walk wouldn't ret disjoint */
if (pgi == PGI_INSIDE) {
@@ -2371,9 +2364,9 @@ retry:
/* inv was entirely inside page, done after bisect */
write_trylock_will_succeed(&right->rwlock);
rbtree_insert(&right->node, par, pnode, &cinf->pg_root);
lru_accessed(sb, cinf, right);
write_unlock(&right->rwlock);
write_unlock(&pg->rwlock);
lru_accessed(sb, cinf, right);
right = NULL;
break;
}
@@ -2403,6 +2396,7 @@ static int item_lru_shrink(struct shrinker *shrink,
struct active_reader *active;
struct cached_page *tmp;
struct cached_page *pg;
LIST_HEAD(list);
int nr;
if (sc->nr_to_scan == 0)
@@ -2439,17 +2433,21 @@ static int item_lru_shrink(struct shrinker *shrink,
__lru_remove(sb, cinf, pg);
rbtree_erase(&pg->node, &cinf->pg_root);
list_move_tail(&pg->lru_head, &list);
invalidate_pcpu_page(pg);
write_unlock(&pg->rwlock);
put_pg(sb, pg);
if (--nr == 0)
break;
}
write_unlock(&cinf->rwlock);
spin_unlock(&cinf->lru_lock);
list_for_each_entry_safe(pg, tmp, &list, lru_head) {
list_del_init(&pg->lru_head);
put_pg(sb, pg);
}
out:
return min_t(unsigned long, cinf->lru_pages, INT_MAX);
}

View File

@@ -34,7 +34,6 @@
#include "data.h"
#include "xattr.h"
#include "item.h"
#include "omap.h"
/*
* scoutfs uses a lock service to manage item cache consistency between
@@ -66,7 +65,7 @@
* relative to that lock state we resend.
*/
#define GRACE_PERIOD_KT ms_to_ktime(10)
#define GRACE_PERIOD_KT ms_to_ktime(2)
/*
* allocated per-super, freed on unmount.
@@ -75,7 +74,6 @@ struct lock_info {
struct super_block *sb;
spinlock_t lock;
bool shutdown;
bool unmounting;
struct rb_root lock_tree;
struct rb_root lock_range_tree;
struct shrinker shrinker;
@@ -89,9 +87,6 @@ struct lock_info {
struct work_struct shrink_work;
struct list_head shrink_list;
atomic64_t next_refresh_gen;
struct work_struct inv_iput_work;
struct llist_head inv_iput_llist;
struct dentry *tseq_dentry;
struct scoutfs_tseq_tree tseq_tree;
};
@@ -127,81 +122,21 @@ static bool lock_modes_match(int granted, int requested)
}
/*
* Final iput can get into evict and perform final inode deletion which
* can delete a lot of items under locks and transactions. We really
* don't want to be doing all that in an iput during invalidation. When
* invalidation sees that iput might perform final deletion it puts them
* on a list and queues this work.
*
* Nothing stops multiple puts for multiple invalidations of an inode
* before the work runs so we can track multiple puts in flight.
*/
static void lock_inv_iput_worker(struct work_struct *work)
{
struct lock_info *linfo = container_of(work, struct lock_info, inv_iput_work);
struct scoutfs_inode_info *si;
struct scoutfs_inode_info *tmp;
struct llist_node *inodes;
bool more;
inodes = llist_del_all(&linfo->inv_iput_llist);
llist_for_each_entry_safe(si, tmp, inodes, inv_iput_llnode) {
do {
more = atomic_dec_return(&si->inv_iput_count) > 0;
iput(&si->inode);
} while (more);
}
}
/*
* Invalidate cached data associated with an inode whose lock is going
* away. We ignore indoes with I_FREEING instead of waiting on them to
* avoid a deadlock, if they're freeing then they won't be visible to
* future lock users and we don't need to invalidate them.
*
* We try to drop cached dentries and inodes covered by the lock if they
* aren't referenced. This removes them from the mount's open map and
* allows deletions to be performed by unlink without having to wait for
* remote cached inodes to be dropped.
*
* If the cached inode was already deferring final inode deletion then
* we can't perform that inline in invalidation. The locking alone
* deadlock, and it might also take multiple transactions to fully
* delete an inode with significant metadata. We only perform the iput
* inline if we know that possible eviction can't perform the final
* deletion, otherwise we kick it off to async work.
* invalidate cached data associated with an inode whose lock is going
* away.
*/
static void invalidate_inode(struct super_block *sb, u64 ino)
{
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_inode_info *si;
struct inode *inode;
inode = scoutfs_ilookup_nofreeing(sb, ino);
inode = scoutfs_ilookup(sb, ino);
if (inode) {
si = SCOUTFS_I(inode);
scoutfs_inc_counter(sb, lock_invalidate_inode);
if (S_ISREG(inode->i_mode)) {
truncate_inode_pages(inode->i_mapping, 0);
scoutfs_data_wait_changed(inode);
}
/* can't touch during unmount, dcache destroys w/o locks */
if (!linfo->unmounting)
d_prune_aliases(inode);
si->drop_invalidated = true;
if (scoutfs_lock_is_covered(sb, &si->ino_lock_cov) && inode->i_nlink > 0) {
iput(inode);
} else {
/* defer iput to work context so we don't evict inodes from invalidation */
if (atomic_inc_return(&si->inv_iput_count) == 1)
llist_add(&si->inv_iput_llnode, &linfo->inv_iput_llist);
smp_wmb(); /* count and list visible before work executes */
queue_work(linfo->workq, &linfo->inv_iput_work);
}
iput(inode);
}
}
@@ -237,16 +172,6 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
/* have to invalidate if we're not in the only usable case */
if (!(prev == SCOUTFS_LOCK_WRITE && mode == SCOUTFS_LOCK_READ)) {
retry:
/* invalidate inodes before removing coverage */
if (lock->start.sk_zone == SCOUTFS_FS_ZONE) {
ino = le64_to_cpu(lock->start.ski_ino);
last = le64_to_cpu(lock->end.ski_ino);
while (ino <= last) {
invalidate_inode(sb, ino);
ino++;
}
}
/* remove cov items to tell users that their cache is stale */
spin_lock(&lock->cov_list_lock);
list_for_each_entry_safe(cov, tmp, &lock->cov_list, head) {
@@ -262,6 +187,15 @@ retry:
}
spin_unlock(&lock->cov_list_lock);
if (lock->start.sk_zone == SCOUTFS_FS_ZONE) {
ino = le64_to_cpu(lock->start.ski_ino);
last = le64_to_cpu(lock->end.ski_ino);
while (ino <= last) {
invalidate_inode(sb, ino);
ino++;
}
}
scoutfs_item_invalidate(sb, &lock->start, &lock->end);
}
@@ -295,7 +229,6 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
BUG_ON(!list_empty(&lock->shrink_head));
BUG_ON(!list_empty(&lock->cov_list));
scoutfs_omap_free_lock_data(lock->omap_data);
kfree(lock);
}
@@ -331,7 +264,6 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
lock->mode = SCOUTFS_LOCK_NULL;
atomic64_set(&lock->forest_bloom_nr, 0);
spin_lock_init(&lock->omap_spinlock);
trace_scoutfs_lock_alloc(sb, lock);
@@ -621,7 +553,7 @@ static void queue_grant_work(struct lock_info *linfo)
{
assert_spin_locked(&linfo->lock);
if (!list_empty(&linfo->grant_list))
if (!list_empty(&linfo->grant_list) && !linfo->shutdown)
queue_work(linfo->workq, &linfo->grant_work);
}
@@ -637,7 +569,7 @@ static void queue_inv_work(struct lock_info *linfo)
{
assert_spin_locked(&linfo->lock);
if (!list_empty(&linfo->inv_list))
if (!list_empty(&linfo->inv_list) && !linfo->shutdown)
mod_delayed_work(linfo->workq, &linfo->inv_dwork, 0);
}
@@ -706,6 +638,7 @@ static void lock_grant_worker(struct work_struct *work)
struct lock_info *linfo = container_of(work, struct lock_info,
grant_work);
struct super_block *sb = linfo->sb;
struct scoutfs_net_lock_grant_response *gr;
struct scoutfs_net_lock *nl;
struct scoutfs_lock *lock;
struct scoutfs_lock *tmp;
@@ -715,7 +648,8 @@ static void lock_grant_worker(struct work_struct *work)
spin_lock(&linfo->lock);
list_for_each_entry_safe(lock, tmp, &linfo->grant_list, grant_head) {
nl = &lock->grant_nl;
gr = &lock->grant_resp;
nl = &lock->grant_resp.nl;
/* wait for reordered invalidation to finish */
if (lock->mode != nl->old_mode)
@@ -733,6 +667,7 @@ static void lock_grant_worker(struct work_struct *work)
lock->request_pending = 0;
lock->mode = nl->new_mode;
lock->write_version = le64_to_cpu(nl->write_version);
lock->roots = gr->roots;
if (lock_count_match_exists(nl->new_mode, lock->waiters))
extend_grace(sb, lock);
@@ -754,8 +689,9 @@ static void lock_grant_worker(struct work_struct *work)
* work to process.
*/
int scoutfs_lock_grant_response(struct super_block *sb,
struct scoutfs_net_lock *nl)
struct scoutfs_net_lock_grant_response *gr)
{
struct scoutfs_net_lock *nl = &gr->nl;
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_lock *lock;
@@ -769,7 +705,7 @@ int scoutfs_lock_grant_response(struct super_block *sb,
trace_scoutfs_lock_grant_response(sb, lock);
BUG_ON(!lock->request_pending);
lock->grant_nl = *nl;
lock->grant_resp = *gr;
list_add_tail(&lock->grant_head, &linfo->grant_list);
queue_grant_work(linfo);
@@ -781,9 +717,7 @@ int scoutfs_lock_grant_response(struct super_block *sb,
/*
* Each lock has received a lock invalidation request from the server
* which specifies a new mode for the lock. The server will only send
* one invalidation request at a time for each lock. The server can
* send another invalidate request after we send the response but before
* we reacquire the lock and finish invalidation.
* one invalidation request at a time for each lock.
*
* This is an unsolicited request from the server so it can arrive at
* any time after we make the server aware of the lock by initially
@@ -836,6 +770,16 @@ static void lock_invalidate_worker(struct work_struct *work)
list_for_each_entry_safe(lock, tmp, &linfo->inv_list, inv_head) {
nl = &lock->inv_nl;
/* skip if grace hasn't elapsed, record earliest */
deadline = lock->grace_deadline;
if (ktime_before(now, deadline)) {
delay = min(delay,
nsecs_to_jiffies(ktime_to_ns(
ktime_sub(deadline, now))));
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
continue;
}
/* wait for reordered grant to finish */
if (lock->mode != nl->old_mode)
continue;
@@ -844,15 +788,6 @@ static void lock_invalidate_worker(struct work_struct *work)
if (!lock_counts_match(nl->new_mode, lock->users))
continue;
/* skip if grace hasn't elapsed, record earliest */
deadline = lock->grace_deadline;
if (!linfo->shutdown && ktime_before(now, deadline)) {
delay = min(delay,
nsecs_to_jiffies(ktime_to_ns(
ktime_sub(deadline, now))));
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
continue;
}
/* set the new mode, no incompatible users during inval */
lock->mode = nl->new_mode;
@@ -870,14 +805,8 @@ static void lock_invalidate_worker(struct work_struct *work)
nl = &lock->inv_nl;
net_id = lock->inv_net_id;
/* only lock protocol, inv can't call subsystems after shutdown */
if (!linfo->shutdown) {
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
BUG_ON(ret);
}
/* allow another request after we respond but before we finish */
lock->inv_net_id = 0;
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
BUG_ON(ret);
/* respond with the key and modes from the request */
ret = scoutfs_client_lock_response(sb, net_id, nl);
@@ -890,13 +819,11 @@ static void lock_invalidate_worker(struct work_struct *work)
spin_lock(&linfo->lock);
list_for_each_entry_safe(lock, tmp, &ready, inv_head) {
list_del_init(&lock->inv_head);
lock->invalidate_pending = 0;
trace_scoutfs_lock_invalidated(sb, lock);
if (lock->inv_net_id == 0) {
/* finish if another request didn't arrive */
list_del_init(&lock->inv_head);
lock->invalidate_pending = 0;
wake_up(&lock->waitq);
}
wake_up(&lock->waitq);
put_lock(linfo, lock);
}
@@ -911,47 +838,34 @@ out:
}
/*
* Record an incoming invalidate request from the server and add its
* lock to the list for processing. This request can be from a new
* server and racing with invalidation that frees from an old server.
* It's fine to not find the requested lock and send an immediate
* response.
* Record an incoming invalidate request from the server and add its lock
* to the list for processing.
*
* The invalidation process drops the linfo lock to send responses. The
* moment it does so we can receive another invalidation request (the
* server can ask us to go from write->read then read->null). We allow
* for one chain like this but it's a bug if we receive more concurrent
* invalidation requests than that. The server should be only sending
* one at a time.
* This is trusting the server and will crash if it's sent bad requests :/
*/
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock *nl)
{
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_lock *lock;
int ret = 0;
scoutfs_inc_counter(sb, lock_invalidate_request);
spin_lock(&linfo->lock);
lock = get_lock(sb, &nl->key);
BUG_ON(!lock);
if (lock) {
BUG_ON(lock->inv_net_id != 0);
lock->inv_net_id = net_id;
BUG_ON(lock->invalidate_pending);
lock->invalidate_pending = 1;
lock->inv_nl = *nl;
if (list_empty(&lock->inv_head)) {
list_add_tail(&lock->inv_head, &linfo->inv_list);
lock->invalidate_pending = 1;
}
lock->inv_net_id = net_id;
list_add_tail(&lock->inv_head, &linfo->inv_list);
trace_scoutfs_lock_invalidate_request(sb, lock);
queue_inv_work(linfo);
}
spin_unlock(&linfo->lock);
if (!lock)
ret = scoutfs_client_lock_response(sb, net_id, nl);
return ret;
return 0;
}
/*
@@ -1082,7 +996,7 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
lock_inc_count(lock->waiters, mode);
for (;;) {
if (WARN_ON_ONCE(linfo->shutdown)) {
if (linfo->shutdown) {
ret = -ESHUTDOWN;
break;
}
@@ -1564,7 +1478,7 @@ restart:
BUG_ON(lock->mode == SCOUTFS_LOCK_NULL);
BUG_ON(!list_empty(&lock->shrink_head));
if (nr-- == 0)
if (linfo->shutdown || nr-- == 0)
break;
__lock_del_lru(linfo, lock);
@@ -1591,7 +1505,7 @@ out:
return ret;
}
void scoutfs_free_unused_locks(struct super_block *sb)
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr)
{
struct lock_info *linfo = SCOUTFS_SB(sb)->lock_info;
struct shrink_control sc = {
@@ -1619,40 +1533,15 @@ static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
}
/*
* shrink_dcache_for_umount() tears down dentries with no locking. We
* need to make sure that our invalidation won't touch dentries before
* we return and the caller calls the generic vfs unmount path.
*/
void scoutfs_lock_unmount_begin(struct super_block *sb)
{
DECLARE_LOCK_INFO(sb, linfo);
if (linfo) {
linfo->unmounting = true;
flush_delayed_work(&linfo->inv_dwork);
}
}
/*
* The caller is going to be shutting down transactions and the client.
* We need to make sure that locking won't call either after we return.
* The caller is going to be calling _destroy soon and, critically, is
* about to shutdown networking before calling us so that we don't get
* any callbacks while we're destroying. We have to ensure that we
* won't call networking after this returns.
*
* At this point all fs callers and internal services that use locks
* should have stopped. We won't have any callers initiating lock
* transitions and sending requests. We set the shutdown flag to catch
* anyone who breaks this rule.
*
* We unregister the shrinker so that we won't try and send null
* requests in response to memory pressure. The locks will all be
* unceremoniously dropped once we get a farewell response from the
* server which indicates that they destroyed our locking state.
*
* We will still respond to invalidation requests that have to be
* processed to let unmount in other mounts acquire locks and make
* progress. However, we don't fully process the invalidation because
* we're shutting down. We only update the lock state and send the
* response. We shouldn't have any users of locking that require
* invalidation correctness at this point.
* Internal fs threads can be using locking, and locking can have async
* work pending. We use ->shutdown to force callers to return
* -ESHUTDOWN and to prevent the future queueing of work that could call
* networking. Locks whose work is stopped will be torn down by _destroy.
*/
void scoutfs_lock_shutdown(struct super_block *sb)
{
@@ -1665,18 +1554,19 @@ void scoutfs_lock_shutdown(struct super_block *sb)
trace_scoutfs_lock_shutdown(sb, linfo);
/* stop the shrinker from queueing work */
unregister_shrinker(&linfo->shrinker);
flush_work(&linfo->shrink_work);
/* cause current and future lock calls to return errors */
spin_lock(&linfo->lock);
linfo->shutdown = true;
for (node = rb_first(&linfo->lock_tree); node; node = rb_next(node)) {
lock = rb_entry(node, struct scoutfs_lock, node);
wake_up(&lock->waitq);
}
spin_unlock(&linfo->lock);
flush_work(&linfo->grant_work);
flush_delayed_work(&linfo->inv_dwork);
flush_work(&linfo->shrink_work);
}
/*
@@ -1704,6 +1594,8 @@ void scoutfs_lock_destroy(struct super_block *sb)
trace_scoutfs_lock_destroy(sb, linfo);
/* stop the shrinker from queueing work */
unregister_shrinker(&linfo->shrinker);
/* make sure that no one's actively using locks */
spin_lock(&linfo->lock);
@@ -1749,10 +1641,8 @@ void scoutfs_lock_destroy(struct super_block *sb)
__lock_del_lru(linfo, lock);
if (!list_empty(&lock->grant_head))
list_del_init(&lock->grant_head);
if (!list_empty(&lock->inv_head)) {
if (!list_empty(&lock->inv_head))
list_del_init(&lock->inv_head);
lock->invalidate_pending = 0;
}
if (!list_empty(&lock->shrink_head))
list_del_init(&lock->shrink_head);
lock_remove(linfo, lock);
@@ -1789,8 +1679,6 @@ int scoutfs_lock_setup(struct super_block *sb)
INIT_WORK(&linfo->shrink_work, lock_shrink_worker);
INIT_LIST_HEAD(&linfo->shrink_list);
atomic64_set(&linfo->next_refresh_gen, 0);
INIT_WORK(&linfo->inv_iput_work, lock_inv_iput_worker);
init_llist_head(&linfo->inv_iput_llist);
scoutfs_tseq_tree_init(&linfo->tseq_tree, lock_tseq_show);
sbi->lock_info = linfo;

View File

@@ -10,8 +10,6 @@
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
struct scoutfs_omap_lock;
/*
* A few fields (start, end, refresh_gen, write_version, granted_mode)
* are referenced by code outside lock.c.
@@ -25,6 +23,7 @@ struct scoutfs_lock {
u64 refresh_gen;
u64 write_version;
u64 dirty_trans_seq;
struct scoutfs_net_roots roots;
struct list_head lru_head;
wait_queue_head_t waitq;
ktime_t grace_deadline;
@@ -32,7 +31,7 @@ struct scoutfs_lock {
invalidate_pending:1;
struct list_head grant_head;
struct scoutfs_net_lock grant_nl;
struct scoutfs_net_lock_grant_response grant_resp;
struct list_head inv_head;
struct scoutfs_net_lock inv_nl;
u64 inv_net_id;
@@ -49,10 +48,6 @@ struct scoutfs_lock {
/* the forest tracks which log tree last saw bloom bit updates */
atomic64_t forest_bloom_nr;
/* open ino mapping has a valid map for a held write lock */
spinlock_t omap_spinlock;
struct scoutfs_omap_lock_data *omap_data;
};
struct scoutfs_lock_coverage {
@@ -62,7 +57,7 @@ struct scoutfs_lock_coverage {
};
int scoutfs_lock_grant_response(struct super_block *sb,
struct scoutfs_net_lock *nl);
struct scoutfs_net_lock_grant_response *gr);
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock *nl);
int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
@@ -101,10 +96,9 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
enum scoutfs_lock_mode mode);
void scoutfs_free_unused_locks(struct super_block *sb);
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr);
int scoutfs_lock_setup(struct super_block *sb);
void scoutfs_lock_unmount_begin(struct super_block *sb);
void scoutfs_lock_shutdown(struct super_block *sb);
void scoutfs_lock_destroy(struct super_block *sb);

View File

@@ -20,10 +20,10 @@
#include "tseq.h"
#include "spbm.h"
#include "block.h"
#include "btree.h"
#include "msg.h"
#include "scoutfs_trace.h"
#include "lock_server.h"
#include "recov.h"
/*
* The scoutfs server implements a simple lock service. Client mounts
@@ -56,11 +56,14 @@
* Message requests and responses are reliably delivered in order across
* reconnection.
*
* As a new server comes up it recovers lock state from existing clients
* which were connected to a previous lock server. Recover requests are
* sent to clients as they connect and they respond with all there
* locks. Once all clients and locks are accounted for normal
* processing can resume.
* The server maintains a persistent record of connected clients. A new
* server instance discovers these and waits for previously connected
* clients to reconnect and recover their state before proceeding. If
* clients don't reconnect they are forcefully prevented from unsafely
* accessing the shared persistent storage. (fenced, according to the
* rules of the platform.. could range from being powered off to having
* their switch port disabled to having their local block device set
* read-only.)
*
* The lock server doesn't respond to memory pressure. The only way
* locks are freed is if they are invalidated to null on behalf of a
@@ -74,8 +77,12 @@ struct lock_server_info {
struct super_block *sb;
spinlock_t lock;
struct mutex mutex;
struct rb_root locks_root;
struct scoutfs_spbm recovery_pending;
struct delayed_work recovery_dwork;
struct scoutfs_tseq_tree tseq_tree;
struct dentry *tseq_dentry;
@@ -423,7 +430,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
goto out;
}
/* XXX should always have a server lock here? */
/* XXX should always have a server lock here? recovery? */
snode = get_server_lock(inf, &nl->key, NULL, false);
if (!snode) {
ret = -EINVAL;
@@ -466,14 +473,18 @@ out:
* so we unlock the snode mutex.
*
* All progress must wait for all clients to finish with recovery
* because we don't know which locks they'll hold. Once recover
* finishes the server calls us to kick all the locks that were waiting
* during recovery.
* because we don't know which locks they'll hold. The unlocked
* recovery_pending test here is OK. It's filled by setup before
* anything runs. It's emptied by recovery completion. We can get a
* false nonempty result if we race with recovery completion, but that's
* OK because recovery completion processes all the locks that have
* requests after emptying, including the unlikely loser of that race.
*/
static int process_waiting_requests(struct super_block *sb,
struct server_lock_node *snode)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_net_lock_grant_response gres;
struct scoutfs_net_lock nl;
struct client_lock_entry *req;
struct client_lock_entry *req_tmp;
@@ -486,7 +497,7 @@ static int process_waiting_requests(struct super_block *sb,
/* processing waits for all invalidation responses or recovery */
if (!list_empty(&snode->invalidated) ||
scoutfs_recov_next_pending(sb, SCOUTFS_RECOV_LOCKS) != 0) {
!scoutfs_spbm_empty(&inf->recovery_pending)) {
ret = 0;
goto out;
}
@@ -536,8 +547,11 @@ static int process_waiting_requests(struct super_block *sb,
nl.write_version = cpu_to_le64(wv);
}
gres.nl = nl;
scoutfs_server_get_roots(sb, &gres.roots);
ret = scoutfs_server_lock_response(sb, req->rid,
req->net_id, &nl);
req->net_id, &gres);
if (ret)
goto out;
@@ -559,39 +573,85 @@ out:
return ret;
}
static void init_lock_clients_key(struct scoutfs_key *key, u64 rid)
{
*key = (struct scoutfs_key) {
.sk_zone = SCOUTFS_LOCK_CLIENTS_ZONE,
.sklc_rid = cpu_to_le64(rid),
};
}
/*
* The server received a greeting from a client for the first time. If
* the client is in lock recovery then we send the initial lock request.
* the client had already talked to the server then we must find an
* existing record for it and should begin recovery. If it doesn't have
* a record then its timed out and we can't allow it to reconnect. If
* its connecting for the first time then we insert a new record. If
*
* This is running in concurrent client greeting processing contexts.
*/
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid)
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
bool should_exist)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
int ret;
if (scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
init_lock_clients_key(&key, rid);
mutex_lock(&inf->mutex);
if (should_exist) {
ret = scoutfs_btree_lookup(sb, &super->lock_clients, &key,
&iref);
if (ret == 0)
scoutfs_btree_put_iref(&iref);
} else {
ret = scoutfs_btree_insert(sb, inf->alloc, inf->wri,
&super->lock_clients,
&key, NULL, 0);
}
mutex_unlock(&inf->mutex);
if (should_exist && ret == 0) {
scoutfs_key_set_zeros(&key);
ret = scoutfs_server_lock_recover_request(sb, rid, &key);
} else {
ret = 0;
if (ret)
goto out;
}
out:
return ret;
}
/*
* All clients have finished lock recovery, we can make forward process
* on all the queued requests that were waiting on recovery.
* A client sent their last recovery response and can exit recovery. If
* they were the last client in recovery then we can process all the
* server locks that had requests.
*/
int scoutfs_lock_server_finished_recovery(struct super_block *sb)
static int finished_recovery(struct super_block *sb, u64 rid, bool cancel)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct server_lock_node *snode;
struct scoutfs_key key;
bool still_pending;
int ret = 0;
spin_lock(&inf->lock);
scoutfs_spbm_clear(&inf->recovery_pending, rid);
still_pending = !scoutfs_spbm_empty(&inf->recovery_pending);
spin_unlock(&inf->lock);
if (still_pending)
return 0;
if (cancel)
cancel_delayed_work_sync(&inf->recovery_dwork);
scoutfs_key_set_zeros(&key);
scoutfs_info(sb, "all lock clients recovered");
while ((snode = get_server_lock(inf, &key, NULL, true))) {
key = snode->key;
@@ -635,15 +695,16 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
int i;
/* client must be in recovery */
if (!scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
spin_lock(&inf->lock);
if (!scoutfs_spbm_test(&inf->recovery_pending, rid))
ret = -EINVAL;
spin_unlock(&inf->lock);
if (ret)
goto out;
}
/* client has sent us all their locks */
if (nlr->nr == 0) {
scoutfs_server_recov_finish(sb, rid, SCOUTFS_RECOV_LOCKS);
ret = 0;
ret = finished_recovery(sb, rid, true);
goto out;
}
@@ -694,15 +755,101 @@ out:
return ret;
}
static int get_rid_and_put_ref(struct scoutfs_btree_item_ref *iref, u64 *rid)
{
int ret;
if (iref->val_len == 0) {
*rid = le64_to_cpu(iref->key->sklc_rid);
ret = 0;
} else {
ret = -EIO;
}
scoutfs_btree_put_iref(iref);
return ret;
}
/*
* This work executes if enough time passes without all of the clients
* finishing with recovery and canceling the work. We walk through the
* client records and find any that still have their recovery pending.
*/
static void scoutfs_lock_server_recovery_timeout(struct work_struct *work)
{
struct lock_server_info *inf = container_of(work,
struct lock_server_info,
recovery_dwork.work);
struct super_block *sb = inf->sb;
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
bool timed_out;
u64 rid;
int ret;
ret = scoutfs_server_hold_commit(sb);
if (ret)
goto out;
/* we enter recovery if there are any client records */
for (rid = 0; ; rid++) {
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
if (ret == -ENOENT) {
ret = 0;
break;
}
if (ret == 0)
ret = get_rid_and_put_ref(&iref, &rid);
if (ret < 0)
break;
spin_lock(&inf->lock);
if (scoutfs_spbm_test(&inf->recovery_pending, rid)) {
scoutfs_spbm_clear(&inf->recovery_pending, rid);
timed_out = true;
} else {
timed_out = false;
}
spin_unlock(&inf->lock);
if (!timed_out)
continue;
scoutfs_err(sb, "client rid %016llx lock recovery timed out",
rid);
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
&super->lock_clients, &key);
if (ret)
break;
}
ret = scoutfs_server_apply_commit(sb, ret);
out:
/* force processing all pending lock requests */
if (ret == 0)
ret = finished_recovery(sb, 0, false);
if (ret < 0) {
scoutfs_err(sb, "lock server saw err %d while timing out clients, shutting down", ret);
scoutfs_server_abort(sb);
}
}
/*
* A client is leaving the lock service. They aren't using locks and
* won't send any more requests. We tear down all the state we had for
* them. This can be called multiple times for a given client as their
* farewell is resent to new servers. It's OK to not find any state.
* If we fail to delete a persistent entry then we have to shut down and
* hope that the next server has more luck.
*/
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct client_lock_entry *clent;
struct client_lock_entry *tmp;
struct server_lock_node *snode;
@@ -711,7 +858,20 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
bool freed;
int ret = 0;
mutex_lock(&inf->mutex);
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
&super->lock_clients, &key);
mutex_unlock(&inf->mutex);
if (ret == -ENOENT) {
ret = 0;
goto out;
}
if (ret < 0)
goto out;
scoutfs_key_set_zeros(&key);
while ((snode = get_server_lock(inf, &key, NULL, true))) {
freed = false;
@@ -796,14 +956,23 @@ static void lock_server_tseq_show(struct seq_file *m,
/*
* Setup the lock server. This is called before networking can deliver
* requests.
* requests. If we find existing client records then we enter recovery.
* Lock request processing is deferred until recovery is resolved for
* all the existing clients, either they reconnect and replay locks or
* we time them out.
*/
int scoutfs_lock_server_setup(struct super_block *sb,
struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, u64 max_vers)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct lock_server_info *inf;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
unsigned int nr;
u64 rid;
int ret;
inf = kzalloc(sizeof(struct lock_server_info), GFP_KERNEL);
if (!inf)
@@ -811,7 +980,11 @@ int scoutfs_lock_server_setup(struct super_block *sb,
inf->sb = sb;
spin_lock_init(&inf->lock);
mutex_init(&inf->mutex);
inf->locks_root = RB_ROOT;
scoutfs_spbm_init(&inf->recovery_pending);
INIT_DELAYED_WORK(&inf->recovery_dwork,
scoutfs_lock_server_recovery_timeout);
scoutfs_tseq_tree_init(&inf->tseq_tree, lock_server_tseq_show);
inf->alloc = alloc;
inf->wri = wri;
@@ -826,7 +999,36 @@ int scoutfs_lock_server_setup(struct super_block *sb,
sbi->lock_server_info = inf;
return 0;
/* we enter recovery if there are any client records */
nr = 0;
for (rid = 0; ; rid++) {
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
if (ret == -ENOENT)
break;
if (ret == 0)
ret = get_rid_and_put_ref(&iref, &rid);
if (ret < 0)
goto out;
ret = scoutfs_spbm_set(&inf->recovery_pending, rid);
if (ret)
goto out;
nr++;
if (rid == U64_MAX)
break;
}
ret = 0;
if (nr) {
schedule_delayed_work(&inf->recovery_dwork,
msecs_to_jiffies(LOCK_SERVER_RECOVERY_MS));
scoutfs_info(sb, "waiting for %u lock clients to recover", nr);
}
out:
return ret;
}
/*
@@ -844,6 +1046,8 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
LIST_HEAD(list);
if (inf) {
cancel_delayed_work_sync(&inf->recovery_dwork);
debugfs_remove(inf->tseq_dentry);
rbtree_postorder_for_each_entry_safe(snode, stmp,
@@ -862,6 +1066,8 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
kfree(snode);
}
scoutfs_spbm_destroy(&inf->recovery_pending);
kfree(inf);
sbi->lock_server_info = NULL;
}

View File

@@ -3,10 +3,10 @@
int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
struct scoutfs_net_lock_recover *nlr);
int scoutfs_lock_server_finished_recovery(struct super_block *sb);
int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
u64 net_id, struct scoutfs_net_lock *nl);
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid);
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
bool should_exist);
int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
struct scoutfs_net_lock *nl);
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid);

View File

@@ -944,6 +944,7 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
struct scoutfs_net_connection *acc_conn;
DECLARE_WAIT_QUEUE_HEAD(waitq);
struct socket *acc_sock;
LIST_HEAD(conn_list);
int ret;
trace_scoutfs_net_listen_work_enter(sb, 0, 0);
@@ -1545,8 +1546,9 @@ void scoutfs_net_client_greeting(struct super_block *sb,
* response and they can disconnect cleanly.
*
* At this point our connection is idle except for send submissions and
* shutdown being queued. We have exclusive access to the previous conn
* once it's shutdown and we set _freeing.
* shutdown being queued. Once we shut down a We completely own a We
* have exclusive access to a previous conn once its shutdown and we set
* _freeing.
*/
void scoutfs_net_server_greeting(struct super_block *sb,
struct scoutfs_net_connection *conn,

View File

@@ -90,13 +90,19 @@ enum conn_flags {
#define SIN_ARG(sin) sin, be16_to_cpu((sin)->sin_port)
static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
union scoutfs_inet_addr *addr)
struct scoutfs_inet_addr *addr)
{
BUG_ON(addr->v4.family != cpu_to_le16(SCOUTFS_AF_IPV4));
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->v4.addr));
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
sin->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->addr));
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->port));
}
static inline void scoutfs_addr_from_sin(struct scoutfs_inet_addr *addr,
struct sockaddr_in *sin)
{
addr->addr = be32_to_le32(sin->sin_addr.s_addr);
addr->port = be16_to_le16(sin->sin_port);
memset(addr->__pad, 0, sizeof(addr->__pad));
}
struct scoutfs_net_connection *

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +0,0 @@
#ifndef _SCOUTFS_OMAP_H_
#define _SCOUTFS_OMAP_H_
int scoutfs_omap_inc(struct super_block *sb, u64 ino);
void scoutfs_omap_dec(struct super_block *sb, u64 ino);
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
struct scoutfs_lock **lock_ret);
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata);
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
struct scoutfs_open_ino_map_args *args);
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid);
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid);
int scoutfs_omap_finished_recovery(struct super_block *sb);
int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_open_ino_map_args *args);
int scoutfs_omap_server_handle_response(struct super_block *sb, u64 rid,
struct scoutfs_open_ino_map *resp_map);
void scoutfs_omap_server_shutdown(struct super_block *sb);
int scoutfs_omap_setup(struct super_block *sb);
void scoutfs_omap_destroy(struct super_block *sb);
#endif

View File

@@ -28,7 +28,7 @@
#include "super.h"
static const match_table_t tokens = {
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
{Opt_server_addr, "server_addr=%s"},
{Opt_metadev_path, "metadev_path=%s"},
{Opt_err, NULL}
};
@@ -43,6 +43,46 @@ u32 scoutfs_option_u32(struct super_block *sb, int token)
return 0;
}
/* The caller's string is null terminted and can be clobbered */
static int parse_ipv4(struct super_block *sb, char *str,
struct sockaddr_in *sin)
{
unsigned long port = 0;
__be32 addr;
char *c;
int ret;
/* null term port, if specified */
c = strchr(str, ':');
if (c)
*c = '\0';
/* parse addr */
addr = in_aton(str);
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
ipv4_is_zeronet(addr) ||
ipv4_is_local_multicast(addr)) {
scoutfs_err(sb, "invalid unicast ipv4 address: %s", str);
return -EINVAL;
}
/* parse port, if specified */
if (c) {
c++;
ret = kstrtoul(c, 0, &port);
if (ret != 0 || port == 0 || port >= U16_MAX) {
scoutfs_err(sb, "invalid port in ipv4 address: %s", c);
return -EINVAL;
}
}
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = addr;
sin->sin_port = cpu_to_be16(port);
return 0;
}
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
char **bdev_path_ret)
{
@@ -92,15 +132,14 @@ out:
int scoutfs_parse_options(struct super_block *sb, char *options,
struct mount_options *parsed)
{
char ipstr[INET_ADDRSTRLEN + 1];
substring_t args[MAX_OPT_ARGS];
int nr;
int token;
char *p;
int ret;
/* Set defaults */
memset(parsed, 0, sizeof(*parsed));
parsed->quorum_slot_nr = -1;
while ((p = strsep(&options, ",")) != NULL) {
if (!*p)
@@ -108,23 +147,12 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
token = match_token(p, tokens, args);
switch (token) {
case Opt_quorum_slot_nr:
case Opt_server_addr:
if (parsed->quorum_slot_nr != -1) {
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
return -EINVAL;
}
ret = match_int(args, &nr);
if (ret < 0 || nr < 0 ||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
SCOUTFS_QUORUM_MAX_SLOTS - 1);
if (ret == 0)
ret = -EINVAL;
match_strlcpy(ipstr, args, ARRAY_SIZE(ipstr));
ret = parse_ipv4(sb, ipstr, &parsed->server_addr);
if (ret < 0)
return ret;
}
parsed->quorum_slot_nr = nr;
break;
case Opt_metadev_path:

View File

@@ -6,13 +6,13 @@
#include "format.h"
enum scoutfs_mount_options {
Opt_quorum_slot_nr,
Opt_server_addr,
Opt_metadev_path,
Opt_err,
};
struct mount_options {
int quorum_slot_nr;
struct sockaddr_in server_addr;
char *metadev_path;
};

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,10 @@
#ifndef _SCOUTFS_QUORUM_H_
#define _SCOUTFS_QUORUM_H_
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
void scoutfs_quorum_server_shutdown(struct super_block *sb);
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
struct sockaddr_in *sin);
int scoutfs_quorum_election(struct super_block *sb, ktime_t timeout_abs,
u64 prev_term, u64 *elected_term);
void scoutfs_quorum_clear_leader(struct super_block *sb);
int scoutfs_quorum_setup(struct super_block *sb);
void scoutfs_quorum_shutdown(struct super_block *sb);
void scoutfs_quorum_destroy(struct super_block *sb);
#endif

View File

@@ -1,280 +0,0 @@
/*
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/rhashtable.h>
#include <linux/rcupdate.h>
#include "super.h"
#include "recov.h"
/*
* There are a few server messages which can't be processed until they
* know that they have state for all possibly active clients. These
* little helpers track which clients have recovered what state and give
* those message handlers a call to check if recovery has completed. We
* track the timeout here, but all we do is call back into the server to
* take steps to evict timed out clients and then let us know that their
* recovery has finished.
*/
struct recov_info {
struct super_block *sb;
spinlock_t lock;
struct list_head pending;
struct timer_list timer;
void (*timeout_fn)(struct super_block *);
};
#define DECLARE_RECOV_INFO(sb, name) \
struct recov_info *name = SCOUTFS_SB(sb)->recov_info
struct recov_pending {
struct list_head head;
u64 rid;
int which;
};
static struct recov_pending *find_pending(struct recov_info *recinf, u64 rid, int which)
{
struct recov_pending *pend;
list_for_each_entry(pend, &recinf->pending, head) {
if ((rid == 0 || pend->rid == rid) && (pend->which & which))
return pend;
}
return NULL;
}
/*
* Record that we'll be waiting for a client to recover something.
* _finished will eventually be called for every _prepare, either
* because recovery naturally finished or because it timed out and the
* server evicted the client.
*/
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *alloc;
struct recov_pending *pend;
if (WARN_ON_ONCE(which & SCOUTFS_RECOV_INVALID))
return -EINVAL;
alloc = kmalloc(sizeof(*pend), GFP_NOFS);
if (!alloc)
return -ENOMEM;
spin_lock(&recinf->lock);
pend = find_pending(recinf, rid, SCOUTFS_RECOV_ALL);
if (pend) {
pend->which |= which;
} else {
swap(pend, alloc);
pend->rid = rid;
pend->which = which;
list_add(&pend->head, &recinf->pending);
}
spin_unlock(&recinf->lock);
kfree(alloc);
return 0;
}
/*
* Recovery is only finished once we've begun (which sets the timer) and
* all clients have finished. If we didn't test the timer we could
* claim it finished prematurely as clients are being prepared.
*/
static int recov_finished(struct recov_info *recinf)
{
return !!(recinf->timeout_fn != NULL && list_empty(&recinf->pending));
}
static void timer_callback(struct timer_list *timer)
{
struct recov_info *recinf = from_timer(recinf, timer, timer);
recinf->timeout_fn(recinf->sb);
}
/*
* Begin waiting for recovery once we've prepared all the clients. If
* the timeout period elapses before _finish is called on all prepared
* clients then the timer will call the callback.
*
* Returns > 0 if all the prepared clients finish recovery before begin
* is called.
*/
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
unsigned int timeout_ms)
{
DECLARE_RECOV_INFO(sb, recinf);
int ret;
spin_lock(&recinf->lock);
recinf->timeout_fn = timeout_fn;
recinf->timer.expires = jiffies + msecs_to_jiffies(timeout_ms);
add_timer(&recinf->timer);
ret = recov_finished(recinf);
spin_unlock(&recinf->lock);
if (ret > 0)
del_timer_sync(&recinf->timer);
return ret;
}
/*
* A given client has recovered the given state. If it's finished all
* recovery then we free it, and if all clients have finished recovery
* then we cancel the timeout timer.
*
* Returns > 0 if _begin has been called and all clients have finished.
* The caller will only see > 0 returned once.
*/
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *pend;
int ret = 0;
spin_lock(&recinf->lock);
pend = find_pending(recinf, rid, which);
if (pend) {
pend->which &= ~which;
if (pend->which) {
pend = NULL;
} else {
list_del(&pend->head);
ret = recov_finished(recinf);
}
}
spin_unlock(&recinf->lock);
if (ret > 0)
del_timer_sync(&recinf->timer);
kfree(pend);
return ret;
}
/*
* Returns true if the given client is still trying to recover
* the given state.
*/
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
bool is_pending;
spin_lock(&recinf->lock);
is_pending = find_pending(recinf, rid, which) != NULL;
spin_unlock(&recinf->lock);
return is_pending;
}
/*
* Returns 0 if there are no rids waiting for the given state to be
* recovered. Returns the rid of a client still waiting if there are
* any, in no specified order.
*
* This is inherently racey. Callers are responsible for resolving any
* actions taken based on pending with the recovery finishing, perhaps
* before we return.
*/
u64 scoutfs_recov_next_pending(struct super_block *sb, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *pend;
u64 rid;
spin_lock(&recinf->lock);
pend = find_pending(recinf, 0, which);
rid = pend ? pend->rid : 0;
spin_unlock(&recinf->lock);
return rid;
}
/*
* The server is shutting down and doesn't need to worry about recovery
* anymore. It'll be built up again by the next server, if needed.
*/
void scoutfs_recov_shutdown(struct super_block *sb)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *pend;
struct recov_pending *tmp;
LIST_HEAD(list);
del_timer_sync(&recinf->timer);
spin_lock(&recinf->lock);
list_splice_init(&recinf->pending, &list);
recinf->timeout_fn = NULL;
spin_unlock(&recinf->lock);
list_for_each_entry_safe(pend, tmp, &recinf->pending, head) {
list_del(&pend->head);
kfree(pend);
}
}
int scoutfs_recov_setup(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct recov_info *recinf;
int ret;
recinf = kzalloc(sizeof(struct recov_info), GFP_KERNEL);
if (!recinf) {
ret = -ENOMEM;
goto out;
}
recinf->sb = sb;
spin_lock_init(&recinf->lock);
INIT_LIST_HEAD(&recinf->pending);
timer_setup(&recinf->timer, timer_callback, 0);
sbi->recov_info = recinf;
ret = 0;
out:
return ret;
}
void scoutfs_recov_destroy(struct super_block *sb)
{
DECLARE_RECOV_INFO(sb, recinf);
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
if (recinf) {
scoutfs_recov_shutdown(sb);
kfree(recinf);
sbi->recov_info = NULL;
}
}

View File

@@ -1,23 +0,0 @@
#ifndef _SCOUTFS_RECOV_H_
#define _SCOUTFS_RECOV_H_
enum {
SCOUTFS_RECOV_GREETING = ( 1 << 0),
SCOUTFS_RECOV_LOCKS = ( 1 << 1),
SCOUTFS_RECOV_INVALID = (~0 << 2),
SCOUTFS_RECOV_ALL = (~SCOUTFS_RECOV_INVALID),
};
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which);
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
unsigned int timeout_ms);
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which);
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which);
u64 scoutfs_recov_next_pending(struct super_block *sb, int which);
void scoutfs_recov_shutdown(struct super_block *sb);
int scoutfs_recov_setup(struct super_block *sb);
void scoutfs_recov_destroy(struct super_block *sb);
#endif

View File

@@ -31,6 +31,7 @@
#include "lock.h"
#include "super.h"
#include "ioctl.h"
#include "count.h"
#include "export.h"
#include "dir.h"
#include "server.h"
@@ -423,34 +424,135 @@ TRACE_EVENT(scoutfs_trans_write_func,
TP_printk(SCSBF" dirty %lu", SCSB_TRACE_ARGS, __entry->dirty)
);
DECLARE_EVENT_CLASS(scoutfs_trans_hold_release_class,
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
TRACE_EVENT(scoutfs_release_trans,
TP_PROTO(struct super_block *sb, void *rsv, unsigned int rsv_holders,
struct scoutfs_item_count *res,
struct scoutfs_item_count *act, unsigned int tri_holders,
unsigned int tri_writing, unsigned int tri_items,
unsigned int tri_vals),
TP_ARGS(sb, journal_info, holders),
TP_ARGS(sb, rsv, rsv_holders, res, act, tri_holders, tri_writing,
tri_items, tri_vals),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(unsigned long, journal_info)
__field(int, holders)
__field(void *, rsv)
__field(unsigned int, rsv_holders)
__field(int, res_items)
__field(int, res_vals)
__field(int, act_items)
__field(int, act_vals)
__field(unsigned int, tri_holders)
__field(unsigned int, tri_writing)
__field(unsigned int, tri_items)
__field(unsigned int, tri_vals)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->journal_info = (unsigned long)journal_info;
__entry->holders = holders;
__entry->rsv = rsv;
__entry->rsv_holders = rsv_holders;
__entry->res_items = res->items;
__entry->res_vals = res->vals;
__entry->act_items = act->items;
__entry->act_vals = act->vals;
__entry->tri_holders = tri_holders;
__entry->tri_writing = tri_writing;
__entry->tri_items = tri_items;
__entry->tri_vals = tri_vals;
),
TP_printk(SCSBF" journal_info 0x%0lx holders %d",
SCSB_TRACE_ARGS, __entry->journal_info, __entry->holders)
TP_printk(SCSBF" rsv %p holders %u reserved %u.%u actual "
"%d.%d, trans holders %u writing %u reserved "
"%u.%u", SCSB_TRACE_ARGS, __entry->rsv, __entry->rsv_holders,
__entry->res_items, __entry->res_vals, __entry->act_items,
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
__entry->tri_items, __entry->tri_vals)
);
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_trans_acquired_hold,
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
TP_ARGS(sb, journal_info, holders)
TRACE_EVENT(scoutfs_trans_acquired_hold,
TP_PROTO(struct super_block *sb, const struct scoutfs_item_count *cnt,
void *rsv, unsigned int rsv_holders,
struct scoutfs_item_count *res,
struct scoutfs_item_count *act, unsigned int tri_holders,
unsigned int tri_writing, unsigned int tri_items,
unsigned int tri_vals),
TP_ARGS(sb, cnt, rsv, rsv_holders, res, act, tri_holders, tri_writing,
tri_items, tri_vals),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(int, cnt_items)
__field(int, cnt_vals)
__field(void *, rsv)
__field(unsigned int, rsv_holders)
__field(int, res_items)
__field(int, res_vals)
__field(int, act_items)
__field(int, act_vals)
__field(unsigned int, tri_holders)
__field(unsigned int, tri_writing)
__field(unsigned int, tri_items)
__field(unsigned int, tri_vals)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->cnt_items = cnt->items;
__entry->cnt_vals = cnt->vals;
__entry->rsv = rsv;
__entry->rsv_holders = rsv_holders;
__entry->res_items = res->items;
__entry->res_vals = res->vals;
__entry->act_items = act->items;
__entry->act_vals = act->vals;
__entry->tri_holders = tri_holders;
__entry->tri_writing = tri_writing;
__entry->tri_items = tri_items;
__entry->tri_vals = tri_vals;
),
TP_printk(SCSBF" cnt %u.%u, rsv %p holders %u reserved %u.%u "
"actual %d.%d, trans holders %u writing %u reserved "
"%u.%u", SCSB_TRACE_ARGS, __entry->cnt_items,
__entry->cnt_vals, __entry->rsv, __entry->rsv_holders,
__entry->res_items, __entry->res_vals, __entry->act_items,
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
__entry->tri_items, __entry->tri_vals)
);
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_release_trans,
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
TP_ARGS(sb, journal_info, holders)
TRACE_EVENT(scoutfs_trans_track_item,
TP_PROTO(struct super_block *sb, int delta_items, int delta_vals,
int act_items, int act_vals, int res_items, int res_vals),
TP_ARGS(sb, delta_items, delta_vals, act_items, act_vals, res_items,
res_vals),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(int, delta_items)
__field(int, delta_vals)
__field(int, act_items)
__field(int, act_vals)
__field(int, res_items)
__field(int, res_vals)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->delta_items = delta_items;
__entry->delta_vals = delta_vals;
__entry->act_items = act_items;
__entry->act_vals = act_vals;
__entry->res_items = res_items;
__entry->res_vals = res_vals;
),
TP_printk(SCSBF" delta_items %d delta_vals %d act_items %d act_vals %d res_items %d res_vals %d",
SCSB_TRACE_ARGS, __entry->delta_items, __entry->delta_vals,
__entry->act_items, __entry->act_vals, __entry->res_items,
__entry->res_vals)
);
TRACE_EVENT(scoutfs_ioc_release,
@@ -690,16 +792,15 @@ TRACE_EVENT(scoutfs_evict_inode,
TRACE_EVENT(scoutfs_drop_inode,
TP_PROTO(struct super_block *sb, __u64 ino, unsigned int nlink,
unsigned int unhashed, bool drop_invalidated),
unsigned int unhashed),
TP_ARGS(sb, ino, nlink, unhashed, drop_invalidated),
TP_ARGS(sb, ino, nlink, unhashed),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, ino)
__field(unsigned int, nlink)
__field(unsigned int, unhashed)
__field(unsigned int, drop_invalidated)
),
TP_fast_assign(
@@ -707,12 +808,10 @@ TRACE_EVENT(scoutfs_drop_inode,
__entry->ino = ino;
__entry->nlink = nlink;
__entry->unhashed = unhashed;
__entry->drop_invalidated = !!drop_invalidated;
),
TP_printk(SCSBF" ino %llu nlink %u unhashed %d drop_invalidated %u", SCSB_TRACE_ARGS,
__entry->ino, __entry->nlink, __entry->unhashed,
__entry->drop_invalidated)
TP_printk(SCSBF" ino %llu nlink %u unhashed %d", SCSB_TRACE_ARGS,
__entry->ino, __entry->nlink, __entry->unhashed)
);
TRACE_EVENT(scoutfs_inode_walk_writeback,
@@ -1587,7 +1686,7 @@ TRACE_EVENT(scoutfs_get_name,
);
TRACE_EVENT(scoutfs_btree_read_error,
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *ref),
TP_PROTO(struct super_block *sb, struct scoutfs_btree_ref *ref),
TP_ARGS(sb, ref),
@@ -1607,10 +1706,37 @@ TRACE_EVENT(scoutfs_btree_read_error,
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq)
);
TRACE_EVENT(scoutfs_btree_dirty_block,
TP_PROTO(struct super_block *sb, u64 blkno, u64 seq,
u64 bt_blkno, u64 bt_seq),
TP_ARGS(sb, blkno, seq, bt_blkno, bt_seq),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, blkno)
__field(__u64, seq)
__field(__u64, bt_blkno)
__field(__u64, bt_seq)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->blkno = blkno;
__entry->seq = seq;
__entry->bt_blkno = bt_blkno;
__entry->bt_seq = bt_seq;
),
TP_printk(SCSBF" blkno %llu seq %llu bt_blkno %llu bt_seq %llu",
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq,
__entry->bt_blkno, __entry->bt_seq)
);
TRACE_EVENT(scoutfs_btree_walk,
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
struct scoutfs_key *key, int flags, int level,
struct scoutfs_block_ref *ref),
struct scoutfs_btree_ref *ref),
TP_ARGS(sb, root, key, flags, level, ref),
@@ -1746,69 +1872,118 @@ TRACE_EVENT(scoutfs_lock_message,
__entry->old_mode, __entry->new_mode)
);
DECLARE_EVENT_CLASS(scoutfs_quorum_message_class,
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
TP_ARGS(sb, term, type, nr),
TRACE_EVENT(scoutfs_quorum_election,
TP_PROTO(struct super_block *sb, u64 prev_term),
TP_ARGS(sb, prev_term),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, term)
__field(__u8, type)
__field(int, nr)
__field(__u64, prev_term)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->term = term;
__entry->type = type;
__entry->nr = nr;
__entry->prev_term = prev_term;
),
TP_printk(SCSBF" term %llu type %u nr %d",
SCSB_TRACE_ARGS, __entry->term, __entry->type, __entry->nr)
);
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_send_message,
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
TP_ARGS(sb, term, type, nr)
);
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_recv_message,
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
TP_ARGS(sb, term, type, nr)
TP_printk(SCSBF" prev_term %llu",
SCSB_TRACE_ARGS, __entry->prev_term)
);
TRACE_EVENT(scoutfs_quorum_loop,
TP_PROTO(struct super_block *sb, int role, u64 term, int vote_for,
unsigned long vote_bits, struct timespec64 timeout),
TRACE_EVENT(scoutfs_quorum_election_ret,
TP_PROTO(struct super_block *sb, int ret, u64 elected_term),
TP_ARGS(sb, role, term, vote_for, vote_bits, timeout),
TP_ARGS(sb, ret, elected_term),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(int, ret)
__field(__u64, elected_term)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->ret = ret;
__entry->elected_term = elected_term;
),
TP_printk(SCSBF" ret %d elected_term %llu",
SCSB_TRACE_ARGS, __entry->ret, __entry->elected_term)
);
TRACE_EVENT(scoutfs_quorum_election_vote,
TP_PROTO(struct super_block *sb, int role, u64 term, u64 vote_for_rid,
int votes, int log_cycles, int quorum_count),
TP_ARGS(sb, role, term, vote_for_rid, votes, log_cycles, quorum_count),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, term)
__field(int, role)
__field(int, vote_for)
__field(unsigned long, vote_bits)
__field(unsigned long, vote_count)
__field(unsigned long long, timeout_sec)
__field(int, timeout_nsec)
__field(__u64, term)
__field(__u64, vote_for_rid)
__field(int, votes)
__field(int, log_cycles)
__field(int, quorum_count)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->term = term;
__entry->role = role;
__entry->vote_for = vote_for;
__entry->vote_bits = vote_bits;
__entry->vote_count = hweight_long(vote_bits);
__entry->timeout_sec = timeout.tv_sec;
__entry->timeout_nsec = timeout.tv_nsec;
__entry->term = term;
__entry->vote_for_rid = vote_for_rid;
__entry->votes = votes;
__entry->log_cycles = log_cycles;
__entry->quorum_count = quorum_count;
),
TP_printk(SCSBF" term %llu role %d vote_for %d vote_bits 0x%lx vote_count %lu timeout %llu.%u",
SCSB_TRACE_ARGS, __entry->term, __entry->role,
__entry->vote_for, __entry->vote_bits, __entry->vote_count,
__entry->timeout_sec, __entry->timeout_nsec)
TP_printk(SCSBF" role %d term %llu vote_for_rid %016llx votes %d log_cycles %d quorum_count %d",
SCSB_TRACE_ARGS, __entry->role, __entry->term,
__entry->vote_for_rid, __entry->votes, __entry->log_cycles,
__entry->quorum_count)
);
DECLARE_EVENT_CLASS(scoutfs_quorum_block_class,
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
TP_ARGS(sb, blk),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, blkno)
__field(__u64, term)
__field(__u64, write_nr)
__field(__u64, voter_rid)
__field(__u64, vote_for_rid)
__field(__u32, crc)
__field(__u8, log_nr)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->blkno = le64_to_cpu(blk->blkno);
__entry->term = le64_to_cpu(blk->term);
__entry->write_nr = le64_to_cpu(blk->write_nr);
__entry->voter_rid = le64_to_cpu(blk->voter_rid);
__entry->vote_for_rid = le64_to_cpu(blk->vote_for_rid);
__entry->crc = le32_to_cpu(blk->crc);
__entry->log_nr = blk->log_nr;
),
TP_printk(SCSBF" blkno %llu term %llu write_nr %llu voter_rid %016llx vote_for_rid %016llx crc 0x%08x log_nr %u",
SCSB_TRACE_ARGS, __entry->blkno, __entry->term,
__entry->write_nr, __entry->voter_rid, __entry->vote_for_rid,
__entry->crc, __entry->log_nr)
);
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_read_block,
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
TP_ARGS(sb, blk)
);
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_write_block,
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
TP_ARGS(sb, blk)
);
/*
@@ -1838,27 +2013,31 @@ DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_recv_clock_sync,
);
TRACE_EVENT(scoutfs_trans_seq_advance,
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
TP_PROTO(struct super_block *sb, u64 rid, u64 prev_seq,
u64 next_seq),
TP_ARGS(sb, rid, trans_seq),
TP_ARGS(sb, rid, prev_seq, next_seq),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, s_rid)
__field(__u64, trans_seq)
__field(__u64, prev_seq)
__field(__u64, next_seq)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->s_rid = rid;
__entry->trans_seq = trans_seq;
__entry->prev_seq = prev_seq;
__entry->next_seq = next_seq;
),
TP_printk(SCSBF" rid %016llx trans_seq %llu\n",
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
TP_printk(SCSBF" rid %016llx prev_seq %llu next_seq %llu",
SCSB_TRACE_ARGS, __entry->s_rid, __entry->prev_seq,
__entry->next_seq)
);
TRACE_EVENT(scoutfs_trans_seq_remove,
TRACE_EVENT(scoutfs_trans_seq_farewell,
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
TP_ARGS(sb, rid, trans_seq),
@@ -1938,8 +2117,8 @@ DEFINE_EVENT(scoutfs_forest_bloom_class, scoutfs_forest_bloom_search,
);
TRACE_EVENT(scoutfs_forest_prepare_commit,
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *item_ref,
struct scoutfs_block_ref *bloom_ref),
TP_PROTO(struct super_block *sb, struct scoutfs_btree_ref *item_ref,
struct scoutfs_btree_ref *bloom_ref),
TP_ARGS(sb, item_ref, bloom_ref),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
@@ -2005,45 +2184,18 @@ TRACE_EVENT(scoutfs_forest_init_our_log,
__entry->blkno, __entry->seq)
);
TRACE_EVENT(scoutfs_block_dirty_ref,
TP_PROTO(struct super_block *sb, u64 ref_blkno, u64 ref_seq,
u64 block_blkno, u64 block_seq),
TP_ARGS(sb, ref_blkno, ref_seq, block_blkno, block_seq),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, ref_blkno)
__field(__u64, ref_seq)
__field(__u64, block_blkno)
__field(__u64, block_seq)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->ref_blkno = ref_blkno;
__entry->ref_seq = ref_seq;
__entry->block_blkno = block_blkno;
__entry->block_seq = block_seq;
),
TP_printk(SCSBF" ref_blkno %llu ref_seq %llu block_blkno %llu block_seq %llu",
SCSB_TRACE_ARGS, __entry->ref_blkno, __entry->ref_seq,
__entry->block_blkno, __entry->block_seq)
);
DECLARE_EVENT_CLASS(scoutfs_block_class,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno, int refcount, int io_count,
unsigned long bits, __u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed),
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(void *, bp)
__field(__u64, blkno)
__field(int, refcount)
__field(int, io_count)
__field(long, bits)
__field(__u64, accessed)
__field(unsigned long, bits)
__field(__u64, lru_moved)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
@@ -2052,71 +2204,57 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
__entry->refcount = refcount;
__entry->io_count = io_count;
__entry->bits = bits;
__entry->accessed = accessed;
__entry->lru_moved = lru_moved;
),
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx accessed %llu",
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno, __entry->refcount,
__entry->io_count, __entry->bits, __entry->accessed)
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx lru_moved %llu",
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno,
__entry->refcount, __entry->io_count, __entry->bits,
__entry->lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_allocate,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_free,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_insert,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_remove,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_end_io,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_submit,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_invalidate,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_mark_dirty,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_forget,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_shrink,
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
int refcount, int io_count, unsigned long bits,
__u64 accessed),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
int refcount, int io_count, unsigned long bits, u64 lru_moved),
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
);
DECLARE_EVENT_CLASS(scoutfs_ext_next_class,
@@ -2358,136 +2496,6 @@ TRACE_EVENT(scoutfs_alloc_move,
__entry->ret)
);
TRACE_EVENT(scoutfs_item_read_page,
TP_PROTO(struct super_block *sb, struct scoutfs_key *key,
struct scoutfs_key *pg_start, struct scoutfs_key *pg_end),
TP_ARGS(sb, key, pg_start, pg_end),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
sk_trace_define(key)
sk_trace_define(pg_start)
sk_trace_define(pg_end)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
sk_trace_assign(key, key);
sk_trace_assign(pg_start, pg_start);
sk_trace_assign(pg_end, pg_end);
),
TP_printk(SCSBF" key "SK_FMT" pg_start "SK_FMT" pg_end "SK_FMT,
SCSB_TRACE_ARGS, sk_trace_args(key), sk_trace_args(pg_start),
sk_trace_args(pg_end))
);
TRACE_EVENT(scoutfs_item_invalidate_page,
TP_PROTO(struct super_block *sb, struct scoutfs_key *start,
struct scoutfs_key *end, struct scoutfs_key *pg_start,
struct scoutfs_key *pg_end, int pgi),
TP_ARGS(sb, start, end, pg_start, pg_end, pgi),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
sk_trace_define(start)
sk_trace_define(end)
sk_trace_define(pg_start)
sk_trace_define(pg_end)
__field(int, pgi)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
sk_trace_assign(start, start);
sk_trace_assign(end, end);
sk_trace_assign(pg_start, pg_start);
sk_trace_assign(pg_end, pg_end);
__entry->pgi = pgi;
),
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" pg_start "SK_FMT" pg_end "SK_FMT" pgi %d",
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end),
sk_trace_args(pg_start), sk_trace_args(pg_end), __entry->pgi)
);
DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(void *, grp)
__field(__u64, group_nr)
__field(unsigned int, group_total)
__field(int, bit_nr)
__field(int, bit_count)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->grp = grp;
__entry->group_nr = group_nr;
__entry->group_total = group_total;
__entry->bit_nr = bit_nr;
__entry->bit_count = bit_count;
),
TP_printk(SCSBF" grp %p group_nr %llu group_total %u bit_nr %d bit_count %d",
SCSB_TRACE_ARGS, __entry->grp, __entry->group_nr, __entry->group_total,
__entry->bit_nr, __entry->bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_alloc,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_free,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_inc,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_dec,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_request,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_destroy,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
TRACE_EVENT(scoutfs_omap_should_delete,
TP_PROTO(struct super_block *sb, u64 ino, unsigned int nlink, int ret),
TP_ARGS(sb, ino, nlink, ret),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, ino)
__field(unsigned int, nlink)
__field(int, ret)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->ino = ino;
__entry->nlink = nlink;
__entry->ret = ret;
),
TP_printk(SCSBF" ino %llu nlink %u ret %d",
SCSB_TRACE_ARGS, __entry->ino, __entry->nlink, __entry->ret)
);
#endif /* _TRACE_SCOUTFS_H */
/* This part must be outside protection */

File diff suppressed because it is too large Load Diff

View File

@@ -59,21 +59,18 @@ do { \
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
struct scoutfs_net_lock *nl);
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_net_lock *nl);
struct scoutfs_net_lock_grant_response *gr);
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
struct scoutfs_key *key);
void scoutfs_server_get_roots(struct super_block *sb,
struct scoutfs_net_roots *roots);
int scoutfs_server_hold_commit(struct super_block *sb);
int scoutfs_server_apply_commit(struct super_block *sb, int err);
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
struct scoutfs_open_ino_map_args *args);
int scoutfs_server_send_omap_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_open_ino_map *map, int err);
struct sockaddr_in;
struct scoutfs_quorum_elected_info;
int scoutfs_server_start(struct super_block *sb, u64 term);
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
u64 term);
void scoutfs_server_abort(struct super_block *sb);
void scoutfs_server_stop(struct super_block *sb);

View File

@@ -255,9 +255,24 @@ static u8 height_for_blk(u64 blk)
return hei;
}
static inline u32 srch_level_magic(int level)
static void init_file_block(struct super_block *sb, struct scoutfs_block *bl,
int level)
{
return level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT : SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_block_header *hdr;
/* don't leak uninit kernel mem.. block should do this for us? */
memset(bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
hdr = bl->data;
hdr->fsid = super->hdr.fsid;
hdr->blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
if (level)
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_PARENT);
else
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK);
}
/*
@@ -269,15 +284,39 @@ static inline u32 srch_level_magic(int level)
*/
static int read_srch_block(struct super_block *sb,
struct scoutfs_block_writer *wri, int level,
struct scoutfs_block_ref *ref,
struct scoutfs_srch_ref *ref,
struct scoutfs_block **bl_ret)
{
u32 magic = srch_level_magic(level);
int ret;
struct scoutfs_block *bl;
int retries = 0;
int ret = 0;
int mag;
ret = scoutfs_block_read_ref(sb, ref, magic, bl_ret);
if (ret == -ESTALE)
mag = level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT :
SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
retry:
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
if (!IS_ERR_OR_NULL(bl) &&
!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno, mag)) {
scoutfs_inc_counter(sb, srch_inconsistent_ref);
scoutfs_block_writer_forget(sb, wri, bl);
scoutfs_block_invalidate(sb, bl);
scoutfs_block_put(sb, bl);
bl = NULL;
if (retries++ == 0)
goto retry;
bl = ERR_PTR(-ESTALE);
scoutfs_inc_counter(sb, srch_read_stale);
}
if (IS_ERR(bl)) {
ret = PTR_ERR(bl);
bl = NULL;
}
*bl_ret = bl;
return ret;
}
@@ -294,7 +333,7 @@ static int read_path_block(struct super_block *sb,
{
struct scoutfs_block *bl = NULL;
struct scoutfs_srch_parent *srp;
struct scoutfs_block_ref ref;
struct scoutfs_srch_ref ref;
int level;
int ind;
int ret;
@@ -353,10 +392,12 @@ static int get_file_block(struct super_block *sb,
struct scoutfs_block_header *hdr;
struct scoutfs_block *bl = NULL;
struct scoutfs_srch_parent *srp;
struct scoutfs_block_ref new_root_ref;
struct scoutfs_block_ref *ref;
struct scoutfs_block *new_bl;
struct scoutfs_srch_ref *ref;
u64 blkno = 0;
int level;
int ind;
int err;
int ret;
u8 hei;
@@ -368,21 +409,29 @@ static int get_file_block(struct super_block *sb,
goto out;
}
memset(&new_root_ref, 0, sizeof(new_root_ref));
level = sfl->height;
ret = scoutfs_block_dirty_ref(sb, alloc, wri, &new_root_ref,
srch_level_magic(level), &bl, 0, NULL);
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
if (ret < 0)
goto out;
if (level) {
bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(bl)) {
ret = PTR_ERR(bl);
goto out;
}
blkno = 0;
scoutfs_block_writer_mark_dirty(sb, wri, bl);
init_file_block(sb, bl, sfl->height);
if (sfl->height) {
srp = bl->data;
srp->refs[0] = sfl->ref;
srp->refs[0].blkno = sfl->ref.blkno;
srp->refs[0].seq = sfl->ref.seq;
}
hdr = bl->data;
sfl->ref = new_root_ref;
sfl->ref.blkno = hdr->blkno;
sfl->ref.seq = hdr->seq;
sfl->height++;
scoutfs_block_put(sb, bl);
bl = NULL;
@@ -398,13 +447,54 @@ static int get_file_block(struct super_block *sb,
goto out;
}
if (flags & GFB_DIRTY)
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, srch_level_magic(level),
&bl, 0, NULL);
else
ret = scoutfs_block_read_ref(sb, ref, srch_level_magic(level), &bl);
if (ret < 0)
goto out;
/* read an existing block */
if (ref->blkno) {
ret = read_srch_block(sb, wri, level, ref, &bl);
if (ret < 0)
goto out;
}
/* allocate a new block if we need it */
if (!ref->blkno || ((flags & GFB_DIRTY) &&
!scoutfs_block_writer_is_dirty(sb, bl))) {
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
if (ret < 0)
goto out;
new_bl = scoutfs_block_create(sb, blkno);
if (IS_ERR(new_bl)) {
ret = PTR_ERR(new_bl);
goto out;
}
if (bl) {
/* cow old block if we have one */
ret = scoutfs_free_meta(sb, alloc, wri,
bl->blkno);
if (ret)
goto out;
memcpy(new_bl->data, bl->data,
SCOUTFS_BLOCK_LG_SIZE);
scoutfs_block_put(sb, bl);
bl = new_bl;
hdr = bl->data;
hdr->blkno = cpu_to_le64(bl->blkno);
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
} else {
/* init new allocated block */
bl = new_bl;
init_file_block(sb, bl, level);
}
blkno = 0;
scoutfs_block_writer_mark_dirty(sb, wri, bl);
/* update file or parent block ref */
hdr = bl->data;
ref->blkno = hdr->blkno;
ref->seq = hdr->seq;
}
if (level == 0) {
ret = 0;
@@ -424,6 +514,12 @@ static int get_file_block(struct super_block *sb,
out:
scoutfs_block_put(sb, parent);
/* return allocated blkno on error */
if (blkno > 0) {
err = scoutfs_free_meta(sb, alloc, wri, blkno);
BUG_ON(err); /* radix should have been dirty */
}
if (ret < 0) {
scoutfs_block_put(sb, bl);
bl = NULL;
@@ -1102,10 +1198,14 @@ int scoutfs_srch_get_compact(struct super_block *sb,
for (;;scoutfs_key_inc(&key)) {
ret = scoutfs_btree_next(sb, root, &key, &iref);
if (ret == -ENOENT) {
ret = 0;
sc->nr = 0;
goto out;
}
if (ret == 0) {
if (iref.key->sk_type != type) {
ret = -ENOENT;
} else if (iref.val_len == sizeof(sfl)) {
if (iref.val_len == sizeof(struct scoutfs_srch_file)) {
key = *iref.key;
memcpy(&sfl, iref.val, iref.val_len);
} else {
@@ -1113,25 +1213,24 @@ int scoutfs_srch_get_compact(struct super_block *sb,
}
scoutfs_btree_put_iref(&iref);
}
if (ret < 0) {
/* see if we ran out of log files or files entirely */
if (ret == -ENOENT) {
sc->nr = 0;
if (type == SCOUTFS_SRCH_LOG_TYPE) {
type = SCOUTFS_SRCH_BLOCKS_TYPE;
init_srch_key(&key, type, 0, 0);
continue;
} else {
ret = 0;
}
}
if (ret < 0)
goto out;
}
/* skip any files already being compacted */
if (scoutfs_spbm_test(&busy, le64_to_cpu(sfl.ref.blkno)))
continue;
/* see if we ran out of log files or files entirely */
if (key.sk_type != type) {
sc->nr = 0;
if (key.sk_type == SCOUTFS_SRCH_BLOCKS_TYPE) {
type = SCOUTFS_SRCH_BLOCKS_TYPE;
} else {
ret = 0;
goto out;
}
}
/* reset if we iterated into the next size category */
if (type == SCOUTFS_SRCH_BLOCKS_TYPE) {
order = fls64(le64_to_cpu(sfl.blocks)) /
@@ -2156,8 +2255,7 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
if (ret < 0)
goto commit;
ret = scoutfs_alloc_prepare_commit(sb, &alloc, &wri) ?:
scoutfs_block_writer_write(sb, &wri);
ret = scoutfs_block_writer_write(sb, &wri);
commit:
/* the server won't use our partial compact if _ERROR is set */
sc->meta_avail = alloc.avail;

View File

@@ -44,8 +44,6 @@
#include "srch.h"
#include "item.h"
#include "alloc.h"
#include "recov.h"
#include "omap.h"
#include "scoutfs_trace.h"
static struct dentry *scoutfs_debugfs_root;
@@ -168,7 +166,7 @@ out:
* try to free as many locks as possible.
*/
if (scoutfs_trigger(sb, STATFS_LOCK_PURGE))
scoutfs_free_unused_locks(sb);
scoutfs_free_unused_locks(sb, -1UL);
return ret;
}
@@ -178,8 +176,7 @@ static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
struct super_block *sb = root->d_sb;
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
if (opts->quorum_slot_nr >= 0)
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
seq_printf(seq, ",server_addr="SIN_FMT, SIN_ARG(&opts->server_addr));
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
return 0;
@@ -195,19 +192,20 @@ static ssize_t metadev_path_show(struct kobject *kobj,
}
SCOUTFS_ATTR_RO(metadev_path);
static ssize_t quorum_server_nr_show(struct kobject *kobj,
static ssize_t server_addr_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
return snprintf(buf, PAGE_SIZE, SIN_FMT"\n",
SIN_ARG(&opts->server_addr));
}
SCOUTFS_ATTR_RO(quorum_server_nr);
SCOUTFS_ATTR_RO(server_addr);
static struct attribute *mount_options_attrs[] = {
SCOUTFS_ATTR_PTR(metadev_path),
SCOUTFS_ATTR_PTR(quorum_server_nr),
SCOUTFS_ATTR_PTR(server_addr),
NULL,
};
@@ -245,26 +243,28 @@ static void scoutfs_put_super(struct super_block *sb)
trace_scoutfs_put_super(sb);
sbi->shutdown = true;
scoutfs_data_destroy(sb);
scoutfs_srch_destroy(sb);
scoutfs_unlock(sb, sbi->rid_lock, SCOUTFS_LOCK_WRITE);
sbi->rid_lock = NULL;
scoutfs_lock_shutdown(sb);
scoutfs_shutdown_trans(sb);
scoutfs_client_destroy(sb);
scoutfs_inode_destroy(sb);
scoutfs_item_destroy(sb);
scoutfs_forest_destroy(sb);
scoutfs_data_destroy(sb);
scoutfs_quorum_destroy(sb);
/* the server locks the listen address and compacts */
scoutfs_lock_shutdown(sb);
scoutfs_server_destroy(sb);
scoutfs_recov_destroy(sb);
scoutfs_net_destroy(sb);
scoutfs_lock_destroy(sb);
scoutfs_omap_destroy(sb);
/* server clears quorum leader flag during shutdown */
scoutfs_quorum_destroy(sb);
scoutfs_block_destroy(sb);
scoutfs_destroy_triggers(sb);
@@ -309,34 +309,6 @@ int scoutfs_write_super(struct super_block *sb,
sizeof(struct scoutfs_super_block));
}
static bool invalid_blkno_limits(struct super_block *sb, char *which,
u64 start, __le64 first, __le64 last,
struct block_device *bdev, int shift)
{
u64 blkno;
if (le64_to_cpu(first) < start) {
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
which, le64_to_cpu(first), start);
return true;
}
if (le64_to_cpu(first) > le64_to_cpu(last)) {
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
which, le64_to_cpu(first), which, le64_to_cpu(last));
return true;
}
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
if (le64_to_cpu(last) > blkno) {
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
which, le64_to_cpu(last), blkno);
return true;
}
return false;
}
/*
* Read super, specifying bdev.
*/
@@ -344,9 +316,9 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
struct block_device *bdev,
struct scoutfs_super_block *super_res)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super;
__le32 calc;
u64 blkno;
int ret;
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
@@ -380,27 +352,58 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
}
if (super->version != cpu_to_le64(SCOUTFS_INTEROP_VERSION)) {
scoutfs_err(sb, "super block has invalid version %llu, expected %llu",
le64_to_cpu(super->version),
SCOUTFS_INTEROP_VERSION);
if (super->format_hash != cpu_to_le64(SCOUTFS_FORMAT_HASH)) {
scoutfs_err(sb, "super block has invalid format hash 0x%llx, expected 0x%llx",
le64_to_cpu(super->format_hash),
SCOUTFS_FORMAT_HASH);
ret = -EINVAL;
goto out;
}
/* XXX do we want more rigorous invalid super checking? */
if (invalid_blkno_limits(sb, "meta",
SCOUTFS_META_DEV_START_BLKNO,
super->first_meta_blkno,
super->last_meta_blkno, sbi->meta_bdev,
SCOUTFS_BLOCK_LG_SHIFT) ||
invalid_blkno_limits(sb, "data",
SCOUTFS_DATA_DEV_START_BLKNO,
super->first_data_blkno,
super->last_data_blkno, sb->s_bdev,
SCOUTFS_BLOCK_SM_SHIFT)) {
if (super->quorum_count == 0 ||
super->quorum_count > SCOUTFS_QUORUM_MAX_COUNT) {
scoutfs_err(sb, "super block has invalid quorum count %u, must be > 0 and <= %u",
super->quorum_count, SCOUTFS_QUORUM_MAX_COUNT);
ret = -EINVAL;
goto out;
}
blkno = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
SCOUTFS_BLOCK_SM_LG_SHIFT;
if (le64_to_cpu(super->first_meta_blkno) < blkno) {
scoutfs_err(sb, "super block first meta blkno %llu is within quorum blocks",
le64_to_cpu(super->first_meta_blkno));
ret = -EINVAL;
goto out;
}
if (le64_to_cpu(super->first_meta_blkno) >
le64_to_cpu(super->last_meta_blkno)) {
scoutfs_err(sb, "super block first meta blkno %llu is greater than last meta blkno %llu",
le64_to_cpu(super->first_meta_blkno),
le64_to_cpu(super->last_meta_blkno));
ret = -EINVAL;
goto out;
}
if (le64_to_cpu(super->first_data_blkno) >
le64_to_cpu(super->last_data_blkno)) {
scoutfs_err(sb, "super block first data blkno %llu is greater than last data blkno %llu",
le64_to_cpu(super->first_data_blkno),
le64_to_cpu(super->last_data_blkno));
ret = -EINVAL;
goto out;
}
blkno = (i_size_read(sb->s_bdev->bd_inode) >>
SCOUTFS_BLOCK_SM_SHIFT) - 1;
if (le64_to_cpu(super->last_data_blkno) > blkno) {
scoutfs_err(sb, "super block last data blkno %llu is outsite device size last blkno %llu",
le64_to_cpu(super->last_data_blkno), blkno);
ret = -EINVAL;
goto out;
}
out:
@@ -594,12 +597,10 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
scoutfs_inode_setup(sb) ?:
scoutfs_data_setup(sb) ?:
scoutfs_setup_trans(sb) ?:
scoutfs_omap_setup(sb) ?:
scoutfs_lock_setup(sb) ?:
scoutfs_net_setup(sb) ?:
scoutfs_recov_setup(sb) ?:
scoutfs_server_setup(sb) ?:
scoutfs_quorum_setup(sb) ?:
scoutfs_server_setup(sb) ?:
scoutfs_client_setup(sb) ?:
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
&sbi->rid_lock) ?:
@@ -648,9 +649,6 @@ static void scoutfs_kill_sb(struct super_block *sb)
{
trace_scoutfs_kill_sb(sb);
if (SCOUTFS_HAS_SBI(sb))
scoutfs_lock_unmount_begin(sb);
kill_block_super(sb);
}
@@ -684,10 +682,6 @@ static int __init scoutfs_module_init(void)
".section .note.git_describe,\"a\"\n"
".string \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
".previous\n");
__asm__ __volatile__ (
".section .note.scoutfs_interop_version,\"a\"\n"
".string \""SCOUTFS_INTEROP_VERSION_STR"\\n\"\n"
".previous\n");
scoutfs_init_counters();
@@ -720,4 +714,3 @@ module_exit(scoutfs_module_exit)
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
MODULE_LICENSE("GPL");
MODULE_INFO(git_describe, SCOUTFS_GIT_DESCRIBE);
MODULE_INFO(scoutfs_interop_version, SCOUTFS_INTEROP_VERSION_STR);

View File

@@ -26,8 +26,6 @@ struct net_info;
struct block_info;
struct forest_info;
struct srch_info;
struct recov_info;
struct omap_info;
struct scoutfs_sb_info {
struct super_block *sb;
@@ -50,7 +48,6 @@ struct scoutfs_sb_info {
struct block_info *block_info;
struct forest_info *forest_info;
struct srch_info *srch_info;
struct omap_info *omap_info;
struct item_cache_info *item_cache_info;
wait_queue_head_t trans_hold_wq;
@@ -73,7 +70,6 @@ struct scoutfs_sb_info {
struct lock_server_info *lock_server_info;
struct client_info *client_info;
struct server_info *server_info;
struct recov_info *recov_info;
struct sysfs_info *sfsinfo;
struct scoutfs_counters *counters;
@@ -85,6 +81,8 @@ struct scoutfs_sb_info {
struct dentry *debug_root;
bool shutdown;
unsigned long corruption_messages_once[SC_NR_LONGS];
};

View File

@@ -39,15 +39,17 @@
* track the relationships between dirty blocks so there's only ever one
* transaction being built.
*
* Committing the current dirty transaction can be triggered by sync, a
* regular background commit interval, reaching a dirty block threshold,
* or the transaction running out of its private allocator resources.
* Once all the current holders release the writing func writes out the
* dirty blocks while excluding holders until it finishes.
* The copy of the on-disk super block in the fs sb info has its header
* sequence advanced so that new dirty blocks inherit this dirty
* sequence number. It's only advanced once all those dirty blocks are
* reachable after having first written them all out and then the new
* super with that seq. It's first incremented at mount.
*
* Unfortunately writing holders can nest. We track nested hold callers
* with the per-task journal_info pointer to avoid deadlocks between
* holders that might otherwise wait for a pending commit.
* Unfortunately writers can nest. We don't bother trying to special
* case holding a transaction that you're already holding because that
* requires per-task storage. We just let anyone hold transactions
* regardless of waiters waiting to write, which risks waiters waiting a
* very long time.
*/
/* sync dirty data at least this often */
@@ -57,7 +59,11 @@
* XXX move the rest of the super trans_ fields here.
*/
struct trans_info {
atomic_t holders;
spinlock_t lock;
unsigned reserved_items;
unsigned reserved_vals;
unsigned holders;
bool writing;
struct scoutfs_log_trees lt;
struct scoutfs_alloc alloc;
@@ -67,9 +73,17 @@ struct trans_info {
#define DECLARE_TRANS_INFO(sb, name) \
struct trans_info *name = SCOUTFS_SB(sb)->trans_info
/* avoid the high sign bit out of an abundance of caution*/
#define TRANS_HOLDERS_WRITE_FUNC_BIT (1 << 30)
#define TRANS_HOLDERS_COUNT_MASK (TRANS_HOLDERS_WRITE_FUNC_BIT - 1)
static bool drained_holders(struct trans_info *tri)
{
bool drained;
spin_lock(&tri->lock);
tri->writing = true;
drained = tri->holders == 0;
spin_unlock(&tri->lock);
return drained;
}
static int commit_btrees(struct super_block *sb)
{
@@ -114,36 +128,6 @@ bool scoutfs_trans_has_dirty(struct super_block *sb)
return scoutfs_block_writer_has_dirty(sb, &tri->wri);
}
/*
* This is racing with wait_event conditions, make sure our atomic
* stores and waitqueue loads are ordered.
*/
static void sub_holders_and_wake(struct super_block *sb, int val)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
DECLARE_TRANS_INFO(sb, tri);
atomic_sub(val, &tri->holders);
smp_mb(); /* make sure sub is visible before we wake */
if (waitqueue_active(&sbi->trans_hold_wq))
wake_up(&sbi->trans_hold_wq);
}
/*
* called as a wait_event condition, needs to be careful to not change
* task state and is racing with waking paths that sub_return, test, and
* wake.
*/
static bool drained_holders(struct trans_info *tri)
{
int holders;
smp_mb(); /* make sure task in wait_event queue before atomic read */
holders = atomic_read(&tri->holders) & TRANS_HOLDERS_COUNT_MASK;
return holders == 0;
}
/*
* This work func is responsible for writing out all the dirty blocks
* that make up the current dirty transaction. It prevents writers from
@@ -180,9 +164,6 @@ void scoutfs_trans_write_func(struct work_struct *work)
sbi->trans_task = current;
/* mark that we're writing so holders wait for us to finish and clear our bit */
atomic_add(TRANS_HOLDERS_WRITE_FUNC_BIT, &tri->holders);
wait_event(sbi->trans_hold_wq, drained_holders(tri));
trace_scoutfs_trans_write_func(sb,
@@ -234,8 +215,11 @@ out:
spin_unlock(&sbi->trans_write_lock);
wake_up(&sbi->trans_write_wq);
/* we're done, wake waiting holders */
sub_holders_and_wake(sb, TRANS_HOLDERS_WRITE_FUNC_BIT);
spin_lock(&tri->lock);
tri->writing = false;
spin_unlock(&tri->lock);
wake_up(&sbi->trans_hold_wq);
sbi->trans_task = NULL;
@@ -327,83 +311,64 @@ void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
}
/*
* We store nested holders in the lower bits of journal_info. We use
* some higher bits as a magic value to detect if something goes
* horribly wrong and it gets clobbered.
* Each thread reserves space in the segment for their dirty items while
* they hold the transaction. This is calculated before the first
* transaction hold is acquired. It includes all the potential nested
* item manipulation that could happen with the transaction held.
* Including nested holds avoids having to deal with writing out partial
* transactions while a caller still holds the transaction.
*/
#define TRANS_JI_MAGIC 0xd5700000
#define TRANS_JI_MAGIC_MASK 0xfff00000
#define TRANS_JI_COUNT_MASK 0x000fffff
/* returns true if a caller already had a holder counted in journal_info */
static bool inc_journal_info_holders(void)
{
unsigned long holders = (unsigned long)current->journal_info;
WARN_ON_ONCE(holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) != TRANS_JI_MAGIC));
if (holders == 0)
holders = TRANS_JI_MAGIC;
holders++;
current->journal_info = (void *)holders;
return (holders > (TRANS_JI_MAGIC | 1));
}
static void dec_journal_info_holders(void)
{
unsigned long holders = (unsigned long)current->journal_info;
WARN_ON_ONCE(holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) != TRANS_JI_MAGIC));
WARN_ON_ONCE((holders & TRANS_JI_COUNT_MASK) == 0);
holders--;
if (holders == TRANS_JI_MAGIC)
holders = 0;
current->journal_info = (void *)holders;
}
#define SCOUTFS_RESERVATION_MAGIC 0xd57cd13b
struct scoutfs_reservation {
unsigned magic;
unsigned holders;
struct scoutfs_item_count reserved;
struct scoutfs_item_count actual;
};
/*
* This is called as the wait_event condition for holding a transaction.
* Increment the holder count unless the writer is present. We return
* false to wait until the writer finishes and wakes us.
* Try to hold the transaction. If a caller already holds the trans then
* we piggy back on their hold. We wait if the writer is trying to
* write out the transation. And if our items won't fit then we kick off
* a write.
*
* This can be racing with itself while there's no waiters. We retry
* the cmpxchg instead of returning and waiting.
* This is called as a condition for wait_event. It is very limited in
* the locking (blocking) it can do because the caller has set the task
* state before testing the condition safely race with waking after
* setting the condition. Our checking the amount of dirty metadata
* blocks and free data blocks is racy, but we don't mind the risk of
* delaying or prematurely forcing commits.
*/
static bool inc_holders_unless_writer(struct trans_info *tri)
static bool acquired_hold(struct super_block *sb,
struct scoutfs_reservation *rsv,
const struct scoutfs_item_count *cnt)
{
int holders;
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
DECLARE_TRANS_INFO(sb, tri);
bool acquired = false;
unsigned items;
unsigned vals;
do {
smp_mb(); /* make sure we read after wait puts task in queue */
holders = atomic_read(&tri->holders);
if (holders & TRANS_HOLDERS_WRITE_FUNC_BIT)
return false;
spin_lock(&tri->lock);
} while (atomic_cmpxchg(&tri->holders, holders, holders + 1) != holders);
trace_scoutfs_trans_acquired_hold(sb, cnt, rsv, rsv->holders,
&rsv->reserved, &rsv->actual,
tri->holders, tri->writing,
tri->reserved_items,
tri->reserved_vals);
return true;
}
/* use a caller's existing reservation */
if (rsv->holders)
goto hold;
/*
* As we drop the last trans holder we try to wake a writing thread that
* was waiting for us to finish.
*/
static void release_holders(struct super_block *sb)
{
dec_journal_info_holders();
sub_holders_and_wake(sb, 1);
}
/* wait until the writing thread is finished */
if (tri->writing)
goto out;
/* see if we can reserve space for our item count */
items = tri->reserved_items + cnt->items;
vals = tri->reserved_vals + cnt->vals;
/*
* The caller has incremented holders so it is blocking commits. We
* make some quick checks to see if we need to trigger and wait for
* another commit before proceeding.
*/
static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
{
/*
* In theory each dirty item page could be straddling two full
* blocks, requiring 4 allocations for each item cache page.
@@ -413,9 +378,11 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
* that it accounts for having to dirty parent blocks and
* whatever dirtying is done during the transaction hold.
*/
if (scoutfs_alloc_meta_low(sb, &tri->alloc, scoutfs_item_dirty_pages(sb) * 2)) {
if (scoutfs_alloc_meta_low(sb, &tri->alloc,
scoutfs_item_dirty_pages(sb) * 2)) {
scoutfs_inc_counter(sb, trans_commit_dirty_meta_full);
return true;
queue_trans_work(sbi);
goto out;
}
/*
@@ -427,74 +394,71 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
*/
if (scoutfs_alloc_meta_low(sb, &tri->alloc, 16)) {
scoutfs_inc_counter(sb, trans_commit_meta_alloc_low);
return true;
queue_trans_work(sbi);
goto out;
}
/* Try to refill data allocator before premature enospc */
if (scoutfs_data_alloc_free_bytes(sb) <= SCOUTFS_TRANS_DATA_ALLOC_LWM) {
scoutfs_inc_counter(sb, trans_commit_data_alloc_low);
return true;
}
return false;
}
static bool acquired_hold(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
DECLARE_TRANS_INFO(sb, tri);
bool acquired;
/* if a caller already has a hold we acquire unconditionally */
if (inc_journal_info_holders()) {
atomic_inc(&tri->holders);
acquired = true;
goto out;
}
/* wait if the writer is blocking holds */
if (!inc_holders_unless_writer(tri)) {
dec_journal_info_holders();
acquired = false;
goto out;
}
/* wait if we're triggering another commit */
if (commit_before_hold(sb, tri)) {
release_holders(sb);
queue_trans_work(sbi);
acquired = false;
goto out;
}
trace_scoutfs_trans_acquired_hold(sb, current->journal_info, atomic_read(&tri->holders));
tri->reserved_items = items;
tri->reserved_vals = vals;
rsv->reserved.items = cnt->items;
rsv->reserved.vals = cnt->vals;
hold:
rsv->holders++;
tri->holders++;
acquired = true;
out:
spin_unlock(&tri->lock);
return acquired;
}
/*
* Try to hold the transaction. Holding the transaction prevents it
* from being committed. If a transaction is currently being written
* then we'll block until it's done and our hold can be granted.
*
* If a caller already holds the trans then we unconditionally acquire
* our hold and return to avoid deadlocks with our caller, the writing
* thread, and us. We record nested holds in a call stack with the
* journal_info pointer in the task_struct.
*
* The writing thread marks itself as a global trans_task which
* short-circuits all the hold machinery so it can call code that would
* otherwise try to hold transactions while it is writing.
*/
int scoutfs_hold_trans(struct super_block *sb)
int scoutfs_hold_trans(struct super_block *sb,
const struct scoutfs_item_count cnt)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_reservation *rsv;
int ret;
/*
* Caller shouldn't provide garbage counts, nor counts that
* can't fit in segments by themselves.
*/
if (WARN_ON_ONCE(cnt.items <= 0 || cnt.vals < 0))
return -EINVAL;
if (current == sbi->trans_task)
return 0;
return wait_event_interruptible(sbi->trans_hold_wq, acquired_hold(sb));
rsv = current->journal_info;
if (rsv == NULL) {
rsv = kzalloc(sizeof(struct scoutfs_reservation), GFP_NOFS);
if (!rsv)
return -ENOMEM;
rsv->magic = SCOUTFS_RESERVATION_MAGIC;
current->journal_info = rsv;
}
BUG_ON(rsv->magic != SCOUTFS_RESERVATION_MAGIC);
ret = wait_event_interruptible(sbi->trans_hold_wq,
acquired_hold(sb, rsv, &cnt));
if (ret && rsv->holders == 0) {
current->journal_info = NULL;
kfree(rsv);
}
return ret;
}
/*
@@ -504,22 +468,86 @@ int scoutfs_hold_trans(struct super_block *sb)
*/
bool scoutfs_trans_held(void)
{
unsigned long holders = (unsigned long)current->journal_info;
struct scoutfs_reservation *rsv = current->journal_info;
return (holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) == TRANS_JI_MAGIC));
return rsv && rsv->magic == SCOUTFS_RESERVATION_MAGIC;
}
void scoutfs_release_trans(struct super_block *sb)
/*
* Record a transaction holder's individual contribution to the dirty
* items in the current transaction. We're making sure that the
* reservation matches the possible item manipulations while they hold
* the reservation.
*
* It is possible and legitimate for an individual contribution to be
* negative if they delete dirty items. The item cache makes sure that
* the total dirty item count doesn't fall below zero.
*/
void scoutfs_trans_track_item(struct super_block *sb, signed items,
signed vals)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
DECLARE_TRANS_INFO(sb, tri);
struct scoutfs_reservation *rsv = current->journal_info;
if (current == sbi->trans_task)
return;
release_holders(sb);
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders));
rsv->actual.items += items;
rsv->actual.vals += vals;
trace_scoutfs_trans_track_item(sb, items, vals, rsv->actual.items,
rsv->actual.vals, rsv->reserved.items,
rsv->reserved.vals);
WARN_ON_ONCE(rsv->actual.items > rsv->reserved.items);
WARN_ON_ONCE(rsv->actual.vals > rsv->reserved.vals);
}
/*
* As we drop the last hold in the reservation we try and wake other
* hold attempts that were waiting for space. As we drop the last trans
* holder we try to wake a writing thread that was waiting for us to
* finish.
*/
void scoutfs_release_trans(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_reservation *rsv;
DECLARE_TRANS_INFO(sb, tri);
bool wake = false;
if (current == sbi->trans_task)
return;
rsv = current->journal_info;
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
spin_lock(&tri->lock);
trace_scoutfs_release_trans(sb, rsv, rsv->holders, &rsv->reserved,
&rsv->actual, tri->holders, tri->writing,
tri->reserved_items, tri->reserved_vals);
BUG_ON(rsv->holders <= 0);
BUG_ON(tri->holders <= 0);
if (--rsv->holders == 0) {
tri->reserved_items -= rsv->reserved.items;
tri->reserved_vals -= rsv->reserved.vals;
current->journal_info = NULL;
kfree(rsv);
wake = true;
}
if (--tri->holders == 0)
wake = true;
spin_unlock(&tri->lock);
if (wake)
wake_up(&sbi->trans_hold_wq);
}
/*
@@ -548,7 +576,7 @@ int scoutfs_setup_trans(struct super_block *sb)
if (!tri)
return -ENOMEM;
atomic_set(&tri->holders, 0);
spin_lock_init(&tri->lock);
scoutfs_block_writer_init(sb, &tri->wri);
sbi->trans_write_workq = alloc_workqueue("scoutfs_trans",
@@ -564,15 +592,8 @@ int scoutfs_setup_trans(struct super_block *sb)
}
/*
* While the vfs will have done an fs level sync before calling
* put_super, we may have done work down in our level after all the fs
* ops were done. An example is final inode deletion in iput, that's
* done in generic_shutdown_super after the sync and before calling our
* put_super.
*
* So we always try to write any remaining dirty transactions before
* shutting down. Typically there won't be any dirty data and the
* worker will just return.
* kill_sb calls sync before getting here so we know that dirty data
* should be in flight. We just have to wait for it to quiesce.
*/
void scoutfs_shutdown_trans(struct super_block *sb)
{
@@ -580,18 +601,13 @@ void scoutfs_shutdown_trans(struct super_block *sb)
DECLARE_TRANS_INFO(sb, tri);
if (tri) {
scoutfs_block_writer_forget_all(sb, &tri->wri);
if (sbi->trans_write_workq) {
/* immediately queues pending timer */
flush_delayed_work(&sbi->trans_write_work);
/* prevents re-arming if it has to wait */
cancel_delayed_work_sync(&sbi->trans_write_work);
destroy_workqueue(sbi->trans_write_workq);
/* trans work schedules after shutdown see null */
sbi->trans_write_workq = NULL;
}
scoutfs_block_writer_forget_all(sb, &tri->wri);
kfree(tri);
sbi->trans_info = NULL;
}

View File

@@ -6,16 +6,21 @@
/* the client will force commits if data allocators get too low */
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
#include "count.h"
void scoutfs_trans_write_func(struct work_struct *work);
int scoutfs_trans_sync(struct super_block *sb, int wait);
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
int scoutfs_hold_trans(struct super_block *sb);
int scoutfs_hold_trans(struct super_block *sb,
const struct scoutfs_item_count cnt);
bool scoutfs_trans_held(void);
void scoutfs_release_trans(struct super_block *sb);
u64 scoutfs_trans_sample_seq(struct super_block *sb);
void scoutfs_trans_track_item(struct super_block *sb, signed items,
signed vals);
int scoutfs_trans_get_log_trees(struct super_block *sb);
bool scoutfs_trans_has_dirty(struct super_block *sb);

View File

@@ -38,7 +38,10 @@ struct scoutfs_triggers {
struct scoutfs_triggers *name = SCOUTFS_SB(sb)->triggers
static char *names[] = {
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
[SCOUTFS_TRIGGER_BTREE_STALE_READ] = "btree_stale_read",
[SCOUTFS_TRIGGER_BTREE_ADVANCE_RING_HALF] = "btree_advance_ring_half",
[SCOUTFS_TRIGGER_HARD_STALE_ERROR] = "hard_stale_error",
[SCOUTFS_TRIGGER_SEG_STALE_READ] = "seg_stale_read",
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
};

View File

@@ -2,7 +2,10 @@
#define _SCOUTFS_TRIGGERS_H_
enum scoutfs_trigger {
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
SCOUTFS_TRIGGER_BTREE_STALE_READ,
SCOUTFS_TRIGGER_BTREE_ADVANCE_RING_HALF,
SCOUTFS_TRIGGER_HARD_STALE_ERROR,
SCOUTFS_TRIGGER_SEG_STALE_READ,
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
SCOUTFS_TRIGGER_NR,
};

View File

@@ -577,7 +577,10 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_XATTR_SET(found_parts,
value != NULL,
name_len, size));
if (ret > 0)
goto retry;
if (ret)
@@ -778,7 +781,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
&tgs) != 0)
memset(&tgs, 0, sizeof(tgs));
ret = scoutfs_hold_trans(sb);
ret = scoutfs_hold_trans(sb, SIC_EXACT(2, 0));
if (ret < 0)
break;
release = true;

View File

@@ -1,4 +1,4 @@
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing -I ../kmod/src
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing
SHELL := /usr/bin/bash
# each binary command is built from a single .c file
@@ -6,7 +6,6 @@ BIN := src/createmany \
src/dumb_setxattr \
src/handle_cat \
src/bulk_create_paths \
src/stage_tmpfile \
src/find_xattrs
DEPS := $(wildcard src/*.d)

View File

@@ -3,7 +3,7 @@
t_filter_fs()
{
sed -e 's@mnt/test\.[0-9]*@mnt/test@g' \
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
-e 's@Device: [a-fA-F0-7]*h/[0-9]*d@Device: 0h/0d@g'
}
#
@@ -52,15 +52,12 @@ t_filter_dmesg()
# tests that drop unmount io triggers fencing
re="$re|scoutfs .* error: fencing "
re="$re|scoutfs .*: waiting for .* clients"
re="$re|scoutfs .*: all clients recovered"
re="$re|scoutfs .*: waiting for .* lock clients"
re="$re|scoutfs .*: all lock clients recovered"
re="$re|scoutfs .* error: client rid.*lock recovery timed out"
# some tests mount w/o options
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
# in debugging kernels we can slow things down a bit
re="$re|hrtimer: interrupt took .*"
egrep -v "($re)"
}

View File

@@ -99,19 +99,6 @@ t_first_client_nr()
t_fail "t_first_client_nr didn't find any clients"
}
#
# The number of quorum members needed to form a majority to start the
# server.
#
t_majority_count()
{
if [ "$T_QUORUM" -lt 3 ]; then
echo 1
else
echo $(((T_QUORUM / 2) + 1))
fi
}
t_mount()
{
local nr="$1"
@@ -129,7 +116,7 @@ t_umount()
test "$nr" -lt "$T_NR_MOUNTS" || \
t_fail "fs nr $nr invalid"
eval t_quiet umount \$T_M$nr
eval t_quiet umount \$T_DB$i
}
#
@@ -209,19 +196,12 @@ t_trigger_show() {
echo "trigger $which $string: $(t_trigger_get $which $nr)"
}
t_trigger_arm_silent() {
t_trigger_arm() {
local which="$1"
local nr="$2"
local path=$(t_trigger_path "$nr")
echo 1 > "$path/$which"
}
t_trigger_arm() {
local which="$1"
local nr="$2"
t_trigger_arm_silent $which $nr
t_trigger_show $which armed $nr
}
@@ -236,44 +216,16 @@ t_counter() {
cat "$(t_sysfs_path $nr)/counters/$which"
}
#
# output the difference between the current value of a counter and the
# caller's provided previous value.
#
t_counter_diff_value() {
local which="$1"
local old="$2"
local nr="$3"
local new="$(t_counter $which $nr)"
echo "$((new - old))"
}
#
# output the value of the given counter for the given mount, defaulting
# to mount 0 if a mount isn't specified. For tests which expect a
# specific difference in counters.
# to mount 0 if a mount isn't specified.
#
t_counter_diff() {
local which="$1"
local old="$2"
local nr="$3"
local new
echo "counter $which diff $(t_counter_diff_value $which $old $nr)"
}
#
# output a message indicating whether or not the counter value changed.
# For tests that expect a difference, or not, but the amount of
# difference isn't significant.
#
t_counter_diff_changed() {
local which="$1"
local old="$2"
local nr="$3"
local diff="$(t_counter_diff_value $which $old $nr)"
test "$diff" -eq 0 && \
echo "counter $which didn't change" ||
echo "counter $which changed"
new="$(t_counter $which $nr)"
echo "counter $which diff $((new - old))"
}

View File

@@ -23,18 +23,3 @@ t_require_mounts() {
test "$T_NR_MOUNTS" -ge "$req" || \
t_skip "$req mounts required, only have $T_NR_MOUNTS"
}
#
# Require that the meta device be at least the size string argument, as
# parsed by numfmt using single char base 2 suffixes (iec).. 64G, etc.
#
t_require_meta_size() {
local dev="$T_META_DEVICE"
local req_iec="$1"
local req_bytes=$(numfmt --from=iec --to=none $req_iec)
local dev_bytes=$(blockdev --getsize64 $dev)
local dev_iec=$(numfmt --from=auto --to=iec $dev_bytes)
test "$dev_bytes" -ge "$req_bytes" || \
t_skip "$dev must be at least $req_iec, is $dev_iec"
}

View File

@@ -1,52 +0,0 @@
== create shared test file
== set and get xattrs between mount pairs while retrying
# file: /mnt/test/test/block-stale-reads/file
user.xat="1"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="2"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="3"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="4"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="5"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="6"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="7"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="8"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="9"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed
# file: /mnt/test/test/block-stale-reads/file
user.xat="10"
counter block_cache_remove_stale changed
counter block_cache_remove_stale changed

View File

@@ -1,27 +0,0 @@
== basic unlink deletes
ino found in dseq index
ino not found in dseq index
== local open-unlink waits for close to delete
contents after rm: contents
ino found in dseq index
ino not found in dseq index
== multiple local opens are protected
contents after rm 1: contents
contents after rm 2: contents
ino found in dseq index
ino not found in dseq index
== remote unopened unlink deletes
ino not found in dseq index
ino not found in dseq index
== unlink wait for open on other mount
mount 0 contents after mount 1 rm: contents
ino found in dseq index
ino found in dseq index
stat: cannot stat /mnt/test/test/inode-deletion/file: No such file or directory
ino not found in dseq index
ino not found in dseq index
== lots of deletions use one open map
== open files survive remote scanning orphans
mount 0 contents after mount 1 remounted: contents
ino not found in dseq index
ino not found in dseq index

View File

@@ -1,3 +0,0 @@
== create per mount files
== 30s of racing random mount/umount
== mounting any unmounted

View File

@@ -1,18 +0,0 @@
total file size 33669120
00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 |AAAAAAAAAAAAAAAA|
*
00400000 42 42 42 42 42 42 42 42 42 42 42 42 42 42 42 42 |BBBBBBBBBBBBBBBB|
*
00801000 43 43 43 43 43 43 43 43 43 43 43 43 43 43 43 43 |CCCCCCCCCCCCCCCC|
*
00c03000 44 44 44 44 44 44 44 44 44 44 44 44 44 44 44 44 |DDDDDDDDDDDDDDDD|
*
01006000 45 45 45 45 45 45 45 45 45 45 45 45 45 45 45 45 |EEEEEEEEEEEEEEEE|
*
0140a000 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 |FFFFFFFFFFFFFFFF|
*
0180f000 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 |GGGGGGGGGGGGGGGG|
*
01c15000 48 48 48 48 48 48 48 48 48 48 48 48 48 48 48 48 |HHHHHHHHHHHHHHHH|
*
0201c000

View File

@@ -0,0 +1,11 @@
== create file for xattr ping pong
# file: /mnt/test/test/stale-btree-read/file
user.xat="initial"
== retry btree block read
trigger btree_stale_read armed: 1
# file: /mnt/test/test/stale-btree-read/file
user.xat="btree"
trigger btree_stale_read after: 0
counter btree_stale_read diff 1

View File

@@ -1,7 +1,6 @@
Ran:
generic/001
generic/002
generic/004
generic/005
generic/006
generic/007
@@ -74,6 +73,7 @@ generic/376
generic/377
Not
run:
generic/004
generic/008
generic/009
generic/012
@@ -278,4 +278,4 @@ shared/004
shared/032
shared/051
shared/289
Passed all 73 tests
Passed all 72 tests

View File

@@ -52,17 +52,16 @@ $(basename $0) options:
| the file system to be tested. Will be clobbered by -m mkfs.
-m | Run mkfs on the device before mounting and running
| tests. Implies unmounting existing mounts first.
-n <nr> | The number of devices and mounts to test.
-P | Enable trace_printk.
-n | The number of devices and mounts to test.
-P | Output trace events with printk as they're generated.
-p | Exit script after preparing mounts only, don't run tests.
-q <nr> | The first <nr> mounts will be quorum members. Must be
| at least 1 and no greater than -n number of mounts.
-q <nr> | Specify the quorum count needed to mount. This is
| used when running mkfs and is needed by a few tests.
-r <dir> | Specify the directory in which to store results of
| test runs. The directory will be created if it doesn't
| exist. Previous results will be deleted as each test runs.
-s | Skip git repo checkouts.
-t | Enabled trace events that match the given glob argument.
| Multiple options enable multiple globbed events.
-X | xfstests git repo. Used by tests/xfstests.sh.
-x | xfstests git branch to checkout and track.
-y | xfstests ./check additional args
@@ -78,9 +77,6 @@ done
T_TRACE_DUMP="0"
T_TRACE_PRINTK="0"
# array declarations to be able to use array ops
declare -a T_TRACE_GLOB
while true; do
case $1 in
-a)
@@ -151,7 +147,7 @@ while true; do
;;
-t)
test -n "$2" || die "-t must have trace glob argument"
T_TRACE_GLOB+=("$2")
T_TRACE_GLOB="$2"
shift
;;
-X)
@@ -199,6 +195,7 @@ test -e "$T_EX_META_DEV" || die "extra meta device -f '$T_EX_META_DEV' doesn't e
test -n "$T_EX_DATA_DEV" || die "must specify -e extra data device"
test -e "$T_EX_DATA_DEV" || die "extra data device -e '$T_EX_DATA_DEV' doesn't exist"
test -n "$T_MKFS" -a -z "$T_QUORUM" && die "mkfs (-m) requires quorum (-q)"
test -n "$T_RESULTS" || die "must specify -r results dir"
test -n "$T_XFSTESTS_REPO" -a -z "$T_XFSTESTS_BRANCH" -a -z "$T_SKIP_CHECKOUT" && \
die "-X xfstests repo requires -x xfstests branch"
@@ -208,12 +205,6 @@ test -n "$T_XFSTESTS_BRANCH" -a -z "$T_XFSTESTS_REPO" -a -z "$T_SKIP_CHECKOUT" &
test -n "$T_NR_MOUNTS" || die "must specify -n nr mounts"
test "$T_NR_MOUNTS" -ge 1 -a "$T_NR_MOUNTS" -le 8 || \
die "-n nr mounts must be >= 1 and <= 8"
test -n "$T_QUORUM" || \
die "must specify -q number of mounts that are quorum members"
test "$T_QUORUM" -ge "1" || \
die "-q quorum mmembers must be at least 1"
test "$T_QUORUM" -le "$T_NR_MOUNTS" || \
die "-q quorum mmembers must not be greater than -n mounts"
# top level paths
T_KMOD=$(realpath "$(dirname $0)/../kmod")
@@ -312,14 +303,8 @@ if [ -n "$T_UNMOUNT" ]; then
unmount_all
fi
quo=""
if [ -n "$T_MKFS" ]; then
for i in $(seq -0 $((T_QUORUM - 1))); do
quo="$quo -Q $i,127.0.0.1,$((42000 + i))"
done
msg "making new filesystem with $T_QUORUM quorum members"
cmd scoutfs mkfs -f $quo "$T_META_DEVICE" "$T_DATA_DEVICE"
cmd scoutfs mkfs -Q "$T_QUORUM" "$T_META_DEVICE" "$T_DATA_DEVICE" -f
fi
if [ -n "$T_INSMOD" ]; then
@@ -329,36 +314,22 @@ if [ -n "$T_INSMOD" ]; then
cmd insmod "$T_KMOD/src/scoutfs.ko"
fi
nr_globs=${#T_TRACE_GLOB[@]}
if [ $nr_globs -gt 0 ]; then
if [ -n "$T_TRACE_GLOB" ]; then
msg "enabling trace events"
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
for g in "${T_TRACE_GLOB[@]}"; do
for g in $T_TRACE_GLOB; do
for e in /sys/kernel/debug/tracing/events/scoutfs/$g/enable; do
if test -w "$e"; then
echo 1 > "$e"
else
die "-t glob '$g' matched no scoutfs events"
fi
echo 1 > $e
done
done
nr_events=$(cat /sys/kernel/debug/tracing/set_event | wc -l)
msg "enabled $nr_events trace events from $nr_globs -t globs"
fi
if [ -n "$T_TRACE_PRINTK" ]; then
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
fi
if [ -n "$T_TRACE_DUMP" ]; then
echo "$T_TRACE_DUMP" > /proc/sys/kernel/ftrace_dump_on_oops
fi
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
# always describe tracing in the logs
cmd cat /sys/kernel/debug/tracing/set_event
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
/proc/sys/kernel/ftrace_dump_on_oops
cmd cat /sys/kernel/debug/tracing/set_event
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
/proc/sys/kernel/ftrace_dump_on_oops
fi
#
# mount concurrently so that a quorum is present to elect the leader and
@@ -376,12 +347,8 @@ for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
dir="/mnt/test.$i"
test -d "$dir" || cmd mkdir -p "$dir"
opts="-o metadev_path=$meta_dev"
if [ "$i" -lt "$T_QUORUM" ]; then
opts="$opts,quorum_slot_nr=$i"
fi
msg "mounting $meta_dev|$data_dev on $dir"
opts="-o server_addr=127.0.0.1,metadev_path=$meta_dev"
cmd mount -t scoutfs $opts "$data_dev" "$dir" &
p="$!"
@@ -467,7 +434,7 @@ for t in $tests; do
# get stats from previous pass
last="$T_RESULTS/last-passed-test-stats"
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
stats=$(grep -s "^$test_name" "$last" | cut -d " " -f 2-)
test -n "$stats" && stats="last: $stats"
printf " %-30s $stats" "$test_name"
@@ -530,7 +497,7 @@ for t in $tests; do
echo " passed: $stats"
((passed++))
# save stats for passed test
grep -s -v "^$test_name " "$last" > "$last.tmp"
grep -s -v "^$test_name" "$last" > "$last.tmp"
echo "$test_name $stats" >> "$last.tmp"
mv -f "$last.tmp" "$last"
elif [ "$sts" == "$T_SKIP_STATUS" ]; then
@@ -548,24 +515,24 @@ done
msg "all tests run: $passed passed, $skipped skipped, $failed failed"
unmount_all
if [ -n "$T_TRACE_GLOB" -o -n "$T_TRACE_PRINTK" ]; then
if [ -n "$T_TRACE_GLOB" ]; then
msg "saving traces and disabling tracing"
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
echo 0 > /sys/kernel/debug/tracing/options/trace_printk
cat /sys/kernel/debug/tracing/trace > "$T_RESULTS/traces"
fi
if [ "$skipped" == 0 -a "$failed" == 0 ]; then
status=1
if [ "$failed" == 0 ]; then
msg "all tests passed"
unmount_all
exit 0
status=0
fi
if [ "$skipped" != 0 ]; then
msg "$skipped tests skipped, check skip.log, still mounted"
msg "$skipped tests skipped, check skip.log"
fi
if [ "$failed" != 0 ]; then
msg "$failed tests failed, check fail.log, still mounted"
msg "$failed tests failed, check fail.log"
fi
exit 1
exit $status

View File

@@ -13,13 +13,11 @@ lock-refleak.sh
lock-shrink-consistency.sh
lock-pr-cw-conflict.sh
lock-revoke-getcwd.sh
export-lookup-evict-race.sh
createmany-parallel.sh
createmany-large-names.sh
createmany-rename-large-dir.sh
stage-release-race-alloc.sh
stage-multi-part.sh
stage-tmpfile.sh
basic-posix-consistency.sh
dirent-consistency.sh
lock-ex-race-processes.sh
@@ -30,6 +28,5 @@ setup-error-teardown.sh
mount-unmount-race.sh
createmany-parallel-mounts.sh
archive-light-cycle.sh
block-stale-reads.sh
inode-deletion.sh
stale-btree-read.sh
xfstests.sh

View File

@@ -1,145 +0,0 @@
/*
* Exercise O_TMPFILE creation as well as staging from tmpfiles into
* a released destination file.
*
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <errno.h>
#include <linux/types.h>
#include <assert.h>
#include "ioctl.h"
#define array_size(arr) (sizeof(arr) / sizeof(arr[0]))
/*
* Write known data into 8 tmpfiles.
* Make a new file X and release it
* Move contents of 8 tmpfiles into X.
*/
struct sub_tmp_info {
int fd;
unsigned int offset;
unsigned int length;
};
#define SZ 4096
char buf[SZ];
int main(int argc, char **argv)
{
struct scoutfs_ioctl_release ioctl_args = {0};
struct scoutfs_ioctl_move_blocks mb;
struct sub_tmp_info sub_tmps[8];
int tot_size = 0;
char *dest_file;
int dest_fd;
char *mnt;
int ret;
int i;
if (argc < 3) {
printf("%s <mountpoint> <dest_file>\n", argv[0]);
return 1;
}
mnt = argv[1];
dest_file = argv[2];
for (i = 0; i < array_size(sub_tmps); i++) {
struct sub_tmp_info *sub_tmp = &sub_tmps[i];
int remaining;
sub_tmp->fd = open(mnt, O_RDWR | O_TMPFILE, S_IRUSR | S_IWUSR);
if (sub_tmp->fd < 0) {
perror("error");
exit(1);
}
sub_tmp->offset = tot_size;
/* First tmp file is 4MB */
/* Each is 4k bigger than last */
sub_tmp->length = (i + 1024) * sizeof(buf);
remaining = sub_tmp->length;
/* Each sub tmpfile written with 'A', 'B', etc. */
memset(buf, 'A' + i, sizeof(buf));
while (remaining) {
int written;
written = write(sub_tmp->fd, buf, sizeof(buf));
assert(written == sizeof(buf));
tot_size += sizeof(buf);
remaining -= written;
}
}
printf("total file size %d\n", tot_size);
dest_fd = open(dest_file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (dest_fd == -1) {
perror("error");
exit(1);
}
// make dest file big
ret = posix_fallocate(dest_fd, 0, tot_size);
if (ret) {
perror("error");
exit(1);
}
// release everything in dest file
ioctl_args.offset = 0;
ioctl_args.length = tot_size;
ioctl_args.data_version = 0;
ret = ioctl(dest_fd, SCOUTFS_IOC_RELEASE, &ioctl_args);
if (ret < 0) {
perror("error");
exit(1);
}
// move contents into dest in reverse order
for (i = array_size(sub_tmps) - 1; i >= 0 ; i--) {
struct sub_tmp_info *sub_tmp = &sub_tmps[i];
mb.from_fd = sub_tmp->fd;
mb.from_off = 0;
mb.len = sub_tmp->length;
mb.to_off = sub_tmp->offset;
mb.data_version = 0;
mb.flags = SCOUTFS_IOC_MB_STAGE;
ret = ioctl(dest_fd, SCOUTFS_IOC_MOVE_BLOCKS, &mb);
if (ret < 0) {
perror("error");
exit(1);
}
}
return 0;
}

View File

@@ -160,8 +160,8 @@ for i in $(seq 1 1); do
mkdir -p $(dirname $lnk)
ln "$T_D0/file" $lnk
scoutfs ino-path -p "$T_M0" $ino > "$T_TMP.0"
scoutfs ino-path -p "$T_M1" $ino > "$T_TMP.1"
scoutfs ino-path $ino "$T_M0" > "$T_TMP.0"
scoutfs ino-path $ino "$T_M1" > "$T_TMP.1"
diff -u "$T_TMP.0" "$T_TMP.1"
done
done

View File

@@ -1,61 +0,0 @@
#
# Exercise stale block reading.
#
# It would be very difficult to manipulate the allocators, cache, and
# persistent blocks to create stable block reading scenarios. Instead
# we use triggers to exercise how readers encounter stale blocks.
#
t_require_commands touch setfattr getfattr
inc_wrap_fs_nr()
{
local nr="$(($1 + 1))"
if [ "$nr" == "$T_NR_MOUNTS" ]; then
nr=0
fi
echo $nr
}
GETFATTR="getfattr --absolute-names"
SETFATTR="setfattr"
echo "== create shared test file"
touch "$T_D0/file"
$SETFATTR -n user.xat -v 0 "$T_D0/file"
#
# Trigger retries in the block cache as we bounce xattr values around
# between sequential pairs of mounts. This is a little silly because if
# either of the mounts are the server then they'll almost certaily have
# their trigger fired prematurely by message handling btree calls while
# working with the t_ helpers long before we work with the xattrs. But
# the block cache stale retry path is still being exercised.
#
echo "== set and get xattrs between mount pairs while retrying"
set_nr=0
get_nr=$(inc_wrap_fs_nr $set_nr)
for i in $(seq 1 10); do
eval set_file="\$T_D${set_nr}/file"
eval get_file="\$T_D${get_nr}/file"
old_set=$(t_counter block_cache_remove_stale $set_nr)
old_get=$(t_counter block_cache_remove_stale $get_nr)
t_trigger_arm_silent block_remove_stale $set_nr
t_trigger_arm_silent block_remove_stale $get_nr
$SETFATTR -n user.xat -v $i "$set_file"
$GETFATTR -n user.xat "$get_file" 2>&1 | t_filter_fs
t_counter_diff_changed block_cache_remove_stale $old_set $set_nr
t_counter_diff_changed block_cache_remove_stale $old_get $get_nr
set_nr="$get_nr"
get_nr=$(inc_wrap_fs_nr $set_nr)
done
t_pass

View File

@@ -1,32 +0,0 @@
#
# test racing fh_to_dentry with evict from lock invalidation. We've
# had deadlocks between the ordering of iget and evict when they acquire
# cluster locks.
#
t_require_commands touch stat handle_cat
t_require_mounts 2
CPUS=$(getconf _NPROCESSORS_ONLN)
NR=$((CPUS * 4))
END=$((SECONDS + 30))
touch "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
while test $SECONDS -lt $END; do
for i in $(seq 1 $NR); do
fs=$((RANDOM % T_NR_MOUNTS))
eval dir="\$T_D${fs}"
write=$((RANDOM & 1))
if [ "$write" == 1 ]; then
touch "$dir/file" &
else
handle_cat "$dir" "$ino" &
fi
done
wait
done
t_pass

View File

@@ -1,98 +0,0 @@
#
# test deleting an inode once all its links and references are gone.
#
t_require_commands cat scoutfs
t_require_mounts 2
FILE="$T_D0/file"
check_ino_index() {
local ino="$1"
local dseq="$2"
local mnt="$3"
t_sync_seq_index
scoutfs walk-inodes -p "$mnt" -- data_seq $dseq $(($dseq + 1)) |
awk 'BEGIN { not = "not " }
($4 == '$ino') { not = ""; exit; }
END { print "ino " not "found in dseq index" }'
}
echo "== basic unlink deletes"
echo "contents" > "$FILE"
ino=$(stat -c "%i" "$FILE")
dseq=$(scoutfs stat -s data_seq "$FILE")
check_ino_index "$ino" "$dseq" "$T_M0"
rm -f "$FILE"
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== local open-unlink waits for close to delete"
echo "contents" > "$FILE"
ino=$(stat -c "%i" "$FILE")
dseq=$(scoutfs stat -s data_seq "$FILE")
exec {FD}<"$FILE" # open unused fd, assign to FD
rm -f "$FILE"
echo "contents after rm: $(cat <&$FD)"
check_ino_index "$ino" "$dseq" "$T_M0"
exec {FD}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== multiple local opens are protected"
echo "contents" > "$FILE"
ino=$(stat -c "%i" "$FILE")
dseq=$(scoutfs stat -s data_seq "$FILE")
exec {FD1}<"$FILE"
exec {FD2}<"$FILE"
rm -f "$FILE"
echo "contents after rm 1: $(cat <&$FD1)"
echo "contents after rm 2: $(cat <&$FD2)"
check_ino_index "$ino" "$dseq" "$T_M0"
exec {FD1}>&- # close
exec {FD2}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== remote unopened unlink deletes"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
rm -f "$T_D1/file"
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
echo "== unlink wait for open on other mount"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
exec {FD}<"$T_D0/file"
rm -f "$T_D1/file"
echo "mount 0 contents after mount 1 rm: $(cat <&$FD)"
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
exec {FD}>&- # close
# we know that revalidating will unhash the remote dentry
stat "$T_D0/file" 2>&1 | t_filter_fs
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
echo "== lots of deletions use one open map"
mkdir "$T_D0/dir"
touch "$T_D0/dir"/files-{1..5}
rm -f "$T_D0/dir"/files-*
rmdir "$T_D0/dir"
echo "== open files survive remote scanning orphans"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
exec {FD}<"$T_D0/file"
rm -f "$T_D0/file"
t_umount 1
t_mount 1
echo "mount 0 contents after mount 1 remounted: $(cat <&$FD)"
exec {FD}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
t_pass

View File

@@ -50,7 +50,7 @@ for m in 0 1; do
done
wait
CONF="$((SECONDS - START))"
echo "conf: $CONF" >> $T_TMP.log
echo "conf: $IND" >> $T_TMP.log
if [ "$CONF" -gt "$((IND * 5))" ]; then
t_fail "conflicting $CONF secs is more than 5x independent $IND secs"

View File

@@ -4,23 +4,25 @@
# At the start of the test all mounts are mounted. Each iteration
# randomly decides to change each mount or to leave it alone.
#
# Each iteration create dirty items across the mounts randomly, giving
# unmount some work to do.
# They create dirty items before unmounting to encourage compaction
# while unmounting
#
# For this test to be meaningful it needs multiple mounts beyond the
# quorum majority which can be racing to mount and unmount. A
# reasonable config would be 5 mounts with 3 quorum members. But the
# test will run with whatever count it finds.
# quorum set which can be racing to mount and unmount. A reasonable
# config would be 5 mounts with 3 quorum. But the test will run with
# whatever count it finds.
#
# The test assumes that the first mounts are the quorum members.
# This assumes that all the mounts are configured as voting servers. We
# could update it to be more clever and know that it can always safely
# unmount mounts that aren't configured as servers.
#
majority_nr=$(t_majority_count)
quorum_nr=$T_QUORUM
# nothing to do if we can't unmount
test "$T_NR_MOUNTS" == "$T_QUORUM" && \
t_skip "only quorum members mounted, can't unmount"
cur_quorum=$quorum_nr
test "$cur_quorum" == "$majority_nr" && \
t_skip "all quorum members make up majority, need more mounts to unmount"
nr_mounted=$T_NR_MOUNTS
nr_quorum=$T_QUORUM
echo "== create per mount files"
for i in $(t_fs_nrs); do
@@ -53,42 +55,25 @@ while [ "$SECONDS" -lt "$END" ]; do
fi
if [ "${mounted[$i]}" == 1 ]; then
#
# can always unmount non-quorum mounts,
# can only unmount quorum members beyond majority
#
if [ "$i" -ge "$quorum_nr" -o \
"$cur_quorum" -gt "$majority_nr" ]; then
if [ "$nr_mounted" -gt "$nr_quorum" ]; then
t_umount $i &
pid=$!
echo "umount $i pid $pid quo $cur_quorum" \
>> $T_TMP.log
pids="$pids $pid"
mounted[$i]=0
if [ "$i" -lt "$quorum_nr" ]; then
(( cur_quorum-- ))
fi
(( nr_mounted-- ))
fi
else
t_mount $i &
pid=$!
pids="$pids $pid"
echo "mount $i pid $pid quo $cur_quorum" >> $T_TMP.log
mounted[$i]=1
if [ "$i" -lt "$quorum_nr" ]; then
(( cur_quorum++ ))
fi
(( nr_mounted++ ))
fi
done
echo "waiting (secs $SECONDS)" >> $T_TMP.log
for p in $pids; do
wait $p
rc=$?
if [ "$rc" != 0 ]; then
echo "waiting for pid $p returned $rc"
t_fail "background mount/umount returned error"
fi
t_quiet wait $p
done
echo "done waiting (secs $SECONDS))" >> $T_TMP.log
done

View File

@@ -1,15 +0,0 @@
#
# Run tmpfile_stage and check the output with hexdump.
#
t_require_commands stage_tmpfile hexdump
DEST_FILE="$T_D0/dest_file"
stage_tmpfile $T_D0 $DEST_FILE
hexdump -C "$DEST_FILE"
rm -fr "$DEST_FILE"
t_pass

View File

@@ -0,0 +1,40 @@
#
# verify stale btree block reading
#
t_require_commands touch stat setfattr getfattr createmany
t_require_mounts 2
GETFATTR="getfattr --absolute-names"
SETFATTR="setfattr"
#
# This exercises the soft retry of btree blocks when
# inconsistent cached versions are found. It ensures that basic hard
# error returning turns into EIO in the case where the persistent reread
# blocks and segments really are inconsistent.
#
# The triggers apply across all execution in the file system. So to
# trigger btree block retries in the client we make sure that the server
# is running on the other node.
#
cl=$(t_first_client_nr)
sv=$(t_server_nr)
eval cl_dir="\$T_D${cl}"
eval sv_dir="\$T_D${sv}"
echo "== create file for xattr ping pong"
touch "$sv_dir/file"
$SETFATTR -n user.xat -v initial "$sv_dir/file"
$GETFATTR -n user.xat "$sv_dir/file" 2>&1 | t_filter_fs
echo "== retry btree block read"
$SETFATTR -n user.xat -v btree "$sv_dir/file"
t_trigger_arm btree_stale_read $cl
old=$(t_counter btree_stale_read $cl)
$GETFATTR -n user.xat "$cl_dir/file" 2>&1 | t_filter_fs
t_trigger_show btree_stale_read "after" $cl
t_counter_diff btree_stale_read $old $cl
t_pass

View File

@@ -19,10 +19,10 @@
# make sure we have our config
if [ -z "$T_XFSTESTS_REPO" ]; then
t_fail "xfstests requires -X repo"
t_skip "xfstests requires -X repo"
fi
if [ -z "$T_XFSTESTS_BRANCH" -a -z "$T_SKIP_CHECKOUT" ]; then
t_fail "xfstests requires -x branch"
t_skip "xfstests requires -x branch"
fi
t_quiet mkdir -p "$T_TMPDIR/mnt.scratch"
@@ -37,25 +37,17 @@ t_quiet make
t_quiet sync
# pwd stays in xfstests dir to build config and run
#
# Each filesystem needs specific mkfs and mount options because we put
# quorum member addresess in mkfs options and the metadata device in
# mount options.
#
cat << EOF > local.config
export FSTYP=scoutfs
export MKFS_OPTIONS="-f"
export MKFS_TEST_OPTIONS="-Q 0,127.0.0.1,42000"
export MKFS_SCRATCH_OPTIONS="-Q 0,127.0.0.1,43000"
export MKFS_DEV_OPTIONS="-Q 0,127.0.0.1,44000"
export MKFS_OPTIONS="-Q 1"
export TEST_DEV=$T_DB0
export TEST_DIR=$T_M0
export SCRATCH_META_DEV=$T_EX_META_DEV
export SCRATCH_DEV=$T_EX_DATA_DEV
export SCRATCH_MNT="$T_TMPDIR/mnt.scratch"
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_EX_META_DEV"
export MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
export TEST_FS_MOUNT_OPTS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_EX_META_DEV"
export MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
export TEST_FS_MOUNT_OPTS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
EOF
cat << EOF > local.exclude
@@ -91,7 +83,7 @@ generic/375 # utils output change? update branch?
EOF
t_restore_output
echo " (showing output of xfstests)"
echo "(showing output of xfstests)"
args="-E local.exclude ${T_XFSTESTS_ARGS:--g quick}"
./check $args

View File

@@ -1,11 +1,23 @@
#
# The userspace utils and kernel module share definitions of physical
# structures and ioctls. If we're in the repo we include the kmod
# headers directly, and hash them directly to calculate the format hash.
#
# If we're creating a standalone tarball for distribution we copy the
# headers out of the kmod dir into the tarball. And then when we're
# building in that tarball we use the headers in src/ directly.
#
FMTIOC_H := format.h ioctl.h
FMTIOC_DIST := $(addprefix src/,$(FMTIOC_H))
FMTIOC_KMOD := $(addprefix ../kmod/src/,$(FMTIOC_H))
ifneq ($(wildcard $(firstword $(FMTIOC_KMOD))),)
HASH_FILES := $(FMTIOC_KMOD)
else
HASH_FILES := $(FMTIOC_DIST)
endif
SCOUTFS_FORMAT_HASH := $(shell cat $(HASH_FILES) | md5sum | cut -b1-16)
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -g -msse4.2 \
-fno-strict-aliasing \
-DSCOUTFS_FORMAT_HASH=0x$(SCOUTFS_FORMAT_HASH)LLU

View File

@@ -21,19 +21,21 @@ contains the filesystem's metadata.
.sp
This option is required.
.TP
.B quorum_slot_nr=<number>
The quorum_slot_nr option assigns a quorum member slot to the mount.
The mount will use the slot assignment to claim exclusive ownership of
the slot's configured address and an associated metadata device block.
Each slot number must be used by only one mount at any given time.
.B server_addr=<ipv4:port>
The server_addr option indicates that this mount will participate in
quorum election to try and run a server for all the mounts of its
filesystem. The option specifies the local TCP IPv4 address that the
mount's elected server will listen on for connections from all other
mounts of the filesystem.
.sp
When a mount is assigned a quorum slot it becomes a quorum member and
will participate in the raft leader election process and could start
the server for the filesystem if it is elected leader.
The IPv4 address must be specified as a dotted quad, name resolution is
not supported. A specific port may be provided after a seperating
colon. If no port is specified then a random port will be chosen. The
address will be used for the lifetime of the mount and can not be
changed. The mount must be unmounted to specify a different address.
.sp
The assigned number must match one of the slots defined with \-Q options
when the filesystem was created with mkfs. If the number assigned
doesn't match a number created during mkfs then the mount will fail.
If server_addr is not specified then the mount will read the filesystem
until it sees the address of an elected server to connect to.
.SH FURTHER READING
A
.B scoutfs

View File

@@ -32,7 +32,7 @@ A path within a ScoutFS filesystem.
.PD
.TP
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-slot} NR,ADDR,PORT [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-count} NUM [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
.sp
Initialize a new ScoutFS filesystem on the target devices. Since ScoutFS uses
separate block devices for its metadata and data storage, two are required.
@@ -57,20 +57,18 @@ a faster block device for the metadata device.
The path to the block device to be used for ScoutFS file data. If possible, use
a larger block device for the data device.
.TP
.B "-Q, --quorum-slot NR,ADDR,PORT"
Each \-Q option configures a quorum slot. The NR specifies the number
of the slot to configure which must be between 0 and 14. Each slot
number must only be used once, but they can be used in any order and
they need not be consecutive. This is to allow natural relationships
between slot numbers and nodes which may have arbitrary numbering
schemes. ADDR and PORT are the numerical IPv4 address and port which
will be used as the UDP endpoint for leader elections and as the TCP
listening address for server connections. The number of configured
slots determines the size of the quorum of member mounts which must be
present to start the server for the filesystem to operate. A simple
majority is typically required, while one mount is sufficient if only
one or two slots are configured. Until the majority quorum are present,
all mounts will hang waiting for a server to connect to.
.B "-Q, --quorum-count NUM"
The number of mounts needed to reach quorum and elect one
to be the server. Mounts of the filesystem will hang until a quorum of
mounts are operational.
.sp
Mounts with the
.B server_addr
mount option participate in quorum. The safest quorum number is the
smallest majority of an odd number of participating mounts. For
example,
two out of three total mounts. This ensures that there can only be one
set of mounts that can establish quorum.
.TP
.B "-m, --max-meta-size SIZE"
Limit the space used by ScoutFS on the metadata device to the

View File

@@ -25,13 +25,17 @@ static void init_block(struct scoutfs_btree_block *bt, int level)
*/
void btree_init_root_single(struct scoutfs_btree_root *root,
struct scoutfs_btree_block *bt,
u64 seq, u64 blkno)
u64 blkno, u64 seq, __le64 fsid)
{
root->ref.blkno = cpu_to_le64(blkno);
root->ref.seq = cpu_to_le64(seq);
root->ref.seq = cpu_to_le64(1);
root->height = 1;
memset(bt, 0, SCOUTFS_BLOCK_LG_SIZE);
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
bt->hdr.fsid = fsid;
bt->hdr.blkno = cpu_to_le64(blkno);
bt->hdr.seq = cpu_to_le64(1);
init_block(bt, 0);
}

View File

@@ -3,7 +3,7 @@
void btree_init_root_single(struct scoutfs_btree_root *root,
struct scoutfs_btree_block *bt,
u64 seq, u64 blkno);
u64 blkno, u64 seq, __le64 fsid);
void btree_append_item(struct scoutfs_btree_block *bt,
struct scoutfs_key *key, void *val, int val_len);

View File

@@ -32,22 +32,12 @@
#include "leaf_item_hash.h"
#include "blkid.h"
/*
* Update the block header fields and write out the block.
*/
static int write_block(int fd, u32 magic, __le64 fsid, u64 seq, u64 blkno,
int shift, struct scoutfs_block_header *hdr)
static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
{
size_t size = 1ULL << shift;
ssize_t ret;
hdr->magic = cpu_to_le32(magic);
hdr->fsid = fsid;
hdr->blkno = cpu_to_le64(blkno);
hdr->seq = cpu_to_le64(seq);
hdr->crc = cpu_to_le32(crc_block(hdr, size));
ret = pwrite(fd, hdr, size, blkno << shift);
ret = pwrite(fd, blk, size, blkno << shift);
if (ret != size) {
fprintf(stderr, "write to blkno %llu returned %zd: %s (%d)\n",
blkno, ret, strerror(errno), errno);
@@ -57,18 +47,35 @@ static int write_block(int fd, u32 magic, __le64 fsid, u64 seq, u64 blkno,
return 0;
}
/*
* Update the block's header and write it out.
*/
static int write_block(int fd, u64 blkno, int shift,
struct scoutfs_super_block *super,
struct scoutfs_block_header *hdr)
{
size_t size = 1ULL << shift;
if (super)
*hdr = super->hdr;
hdr->blkno = cpu_to_le64(blkno);
hdr->crc = cpu_to_le32(crc_block(hdr, size));
return write_raw_block(fd, blkno, shift, hdr);
}
/*
* Write the single btree block that contains the blkno and len indexed
* items to store the given extent, and update the root to point to it.
*/
static int write_alloc_root(int fd, __le64 fsid,
static int write_alloc_root(struct scoutfs_super_block *super, int fd,
struct scoutfs_alloc_root *root,
struct scoutfs_btree_block *bt,
u64 seq, u64 blkno, u64 start, u64 len)
u64 blkno, u64 start, u64 len)
{
struct scoutfs_key key;
btree_init_root_single(&root->root, bt, seq, blkno);
btree_init_root_single(&root->root, bt, blkno, 1, super->hdr.fsid);
root->total_len = cpu_to_le64(len);
memset(&key, 0, sizeof(key));
@@ -87,18 +94,19 @@ static int write_alloc_root(int fd, __le64 fsid,
key.skfl_blkno = cpu_to_le64(start);
btree_append_item(bt, &key, NULL, 0);
return write_block(fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, seq, blkno,
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
SCOUTFS_BLOCK_LG_SIZE));
return write_raw_block(fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
}
struct mkfs_args {
unsigned long long quorum_count;
char *meta_device;
char *data_device;
unsigned long long max_meta_size;
unsigned long long max_data_size;
bool force;
int nr_slots;
struct scoutfs_quorum_slot slots[SCOUTFS_QUORUM_MAX_SLOTS];
};
/*
@@ -116,14 +124,12 @@ static int do_mkfs(struct mkfs_args *args)
struct scoutfs_inode inode;
struct scoutfs_alloc_list_block *lblk;
struct scoutfs_btree_block *bt = NULL;
struct scoutfs_block_header *hdr;
struct scoutfs_key key;
struct timeval tv;
int meta_fd = -1;
int data_fd = -1;
char uuid_str[37];
void *zeros = NULL;
char *indent;
u64 blkno;
u64 meta_size;
u64 data_size;
@@ -133,12 +139,10 @@ static int do_mkfs(struct mkfs_args *args)
u64 last_data;
u64 meta_start;
u64 meta_len;
__le64 fsid;
int ret;
int i;
gettimeofday(&tv, NULL);
pseudo_random_bytes(&fsid, sizeof(fsid));
meta_fd = open(args->meta_device, O_RDWR | O_EXCL);
if (meta_fd < 0) {
@@ -187,7 +191,10 @@ static int do_mkfs(struct mkfs_args *args)
if (ret)
goto out;
next_meta = SCOUTFS_META_DEV_START_BLKNO;
/* metadata blocks start after the quorum blocks */
next_meta = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
SCOUTFS_BLOCK_SM_LG_SHIFT;
/* rest of meta dev is available for metadata blocks */
last_meta = (meta_size >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
/* Data blocks go on the data dev */
first_data = SCOUTFS_DATA_DEV_START_BLKNO;
@@ -195,7 +202,10 @@ static int do_mkfs(struct mkfs_args *args)
/* partially initialize the super so we can use it to init others */
memset(super, 0, SCOUTFS_BLOCK_SM_SIZE);
super->version = cpu_to_le64(SCOUTFS_INTEROP_VERSION);
pseudo_random_bytes(&super->hdr.fsid, sizeof(super->hdr.fsid));
super->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SUPER);
super->hdr.seq = cpu_to_le64(1);
super->format_hash = cpu_to_le64(SCOUTFS_FORMAT_HASH);
uuid_generate(super->uuid);
super->next_ino = cpu_to_le64(SCOUTFS_ROOT_INO + 1);
super->next_trans_seq = cpu_to_le64(1);
@@ -205,14 +215,11 @@ static int do_mkfs(struct mkfs_args *args)
super->total_data_blocks = cpu_to_le64(last_data - first_data + 1);
super->first_data_blkno = cpu_to_le64(first_data);
super->last_data_blkno = cpu_to_le64(last_data);
assert(sizeof(args->slots) ==
member_sizeof(struct scoutfs_super_block, qconf.slots));
memcpy(super->qconf.slots, args->slots, sizeof(args->slots));
super->quorum_count = args->quorum_count;
/* fs root starts with root inode and its index items */
blkno = next_meta++;
btree_init_root_single(&super->fs_root, bt, 1, blkno);
btree_init_root_single(&super->fs_root, bt, blkno, 1, super->hdr.fsid);
memset(&key, 0, sizeof(key));
key.sk_zone = SCOUTFS_INODE_INDEX_ZONE;
@@ -237,8 +244,10 @@ static int do_mkfs(struct mkfs_args *args)
inode.mtime.nsec = inode.atime.nsec;
btree_append_item(bt, &key, &inode, sizeof(inode));
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, 1, blkno,
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
SCOUTFS_BLOCK_LG_SIZE));
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
if (ret)
goto out;
@@ -247,6 +256,11 @@ static int do_mkfs(struct mkfs_args *args)
lblk = (void *)bt;
memset(lblk, 0, SCOUTFS_BLOCK_LG_SIZE);
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
lblk->hdr.fsid = super->hdr.fsid;
lblk->hdr.blkno = cpu_to_le64(blkno);
lblk->hdr.seq = cpu_to_le64(1);
meta_len = (64 * 1024 * 1024) >> SCOUTFS_BLOCK_LG_SHIFT;
for (i = 0; i < meta_len; i++) {
lblk->blknos[i] = cpu_to_le64(next_meta);
@@ -254,20 +268,20 @@ static int do_mkfs(struct mkfs_args *args)
}
lblk->nr = cpu_to_le32(i);
super->server_meta_avail[0].ref.blkno = cpu_to_le64(blkno);
super->server_meta_avail[0].ref.seq = cpu_to_le64(1);
super->server_meta_avail[0].ref.blkno = lblk->hdr.blkno;
super->server_meta_avail[0].ref.seq = lblk->hdr.seq;
super->server_meta_avail[0].total_nr = le32_to_le64(lblk->nr);
super->server_meta_avail[0].first_nr = lblk->nr;
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, fsid, 1,
blkno, SCOUTFS_BLOCK_LG_SHIFT, &lblk->hdr);
lblk->hdr.crc = cpu_to_le32(crc_block(&bt->hdr, SCOUTFS_BLOCK_LG_SIZE));
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, lblk);
if (ret)
goto out;
/* the data allocator has a single extent */
blkno = next_meta++;
ret = write_alloc_root(meta_fd, fsid, &super->data_alloc, bt,
1, blkno, first_data,
ret = write_alloc_root(super, meta_fd, &super->data_alloc, bt,
blkno, first_data,
le64_to_cpu(super->total_data_blocks));
if (ret < 0)
goto out;
@@ -284,8 +298,8 @@ static int do_mkfs(struct mkfs_args *args)
/* each meta alloc root contains a portion of free metadata extents */
for (i = 0; i < array_size(super->meta_alloc); i++) {
blkno = next_meta++;
ret = write_alloc_root(meta_fd, fsid, &super->meta_alloc[i], bt,
1, blkno, meta_start,
ret = write_alloc_root(super, meta_fd, &super->meta_alloc[i], bt,
blkno, meta_start,
min(meta_len,
last_meta - meta_start + 1));
if (ret < 0)
@@ -295,11 +309,9 @@ static int do_mkfs(struct mkfs_args *args)
}
/* zero out quorum blocks */
hdr = zeros;
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_QUORUM, fsid,
1, SCOUTFS_QUORUM_BLKNO + i,
SCOUTFS_BLOCK_SM_SHIFT, hdr);
ret = write_raw_block(meta_fd, SCOUTFS_QUORUM_BLKNO + i,
SCOUTFS_BLOCK_SM_SHIFT, zeros);
if (ret < 0) {
fprintf(stderr, "error zeroing quorum block: %s (%d)\n",
strerror(-errno), -errno);
@@ -308,9 +320,9 @@ static int do_mkfs(struct mkfs_args *args)
}
/* write the super block to data dev and meta dev*/
ret = write_block(data_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid, 1,
SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
&super->hdr);
super->hdr.seq = cpu_to_le64(1);
ret = write_block(data_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
NULL, &super->hdr);
if (ret)
goto out;
@@ -322,9 +334,8 @@ static int do_mkfs(struct mkfs_args *args)
}
super->flags |= cpu_to_le64(SCOUTFS_FLAG_IS_META_BDEV);
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid,
1, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
&super->hdr);
ret = write_block(meta_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
NULL, &super->hdr);
if (ret)
goto out;
@@ -341,35 +352,21 @@ static int do_mkfs(struct mkfs_args *args)
" meta device path: %s\n"
" data device path: %s\n"
" fsid: %llx\n"
" version: %llx\n"
" format hash: %llx\n"
" uuid: %s\n"
" 64KB metadata blocks: "SIZE_FMT"\n"
" 4KB data blocks: "SIZE_FMT"\n"
" quorum slots: ",
" quorum count: %u\n",
args->meta_device,
args->data_device,
le64_to_cpu(super->hdr.fsid),
le64_to_cpu(super->version),
le64_to_cpu(super->format_hash),
uuid_str,
SIZE_ARGS(le64_to_cpu(super->total_meta_blocks),
SCOUTFS_BLOCK_LG_SIZE),
SIZE_ARGS(le64_to_cpu(super->total_data_blocks),
SCOUTFS_BLOCK_SM_SIZE));
indent = "";
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
struct scoutfs_quorum_slot *sl = &super->qconf.slots[i];
struct in_addr in;
if (sl->addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
continue;
in.s_addr = htonl(le32_to_cpu(sl->addr.v4.addr));
printf("%s%u: %s:%u", indent,
i, inet_ntoa(in), le16_to_cpu(sl->addr.v4.port));
indent = "\n ";
}
printf("\n");
SCOUTFS_BLOCK_SM_SIZE),
super->quorum_count);
ret = 0;
out:
@@ -386,61 +383,16 @@ out:
return ret;
}
static bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
{
struct in_addr in;
bool valid = true;
char *addr;
int i;
int j;
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
if (slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_NONE))
continue;
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4)) {
fprintf(stderr, "quorum slot nr %u has invalid family %u\n",
i, le16_to_cpu(slots[i].addr.v4.family));
valid = false;
}
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
continue;
if (slots[i].addr.v4.addr == slots[j].addr.v4.addr &&
slots[i].addr.v4.port == slots[j].addr.v4.port) {
in.s_addr =
htonl(le32_to_cpu(slots[i].addr.v4.addr));
addr = inet_ntoa(in);
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
i, j, addr,
le16_to_cpu(slots[i].addr.v4.port));
valid = false;
}
}
}
return valid;
}
static int parse_opt(int key, char *arg, struct argp_state *state)
{
struct mkfs_args *args = state->input;
struct scoutfs_quorum_slot slot;
int ret;
switch (key) {
case 'Q':
ret = parse_quorum_slot(&slot, arg);
if (ret < 0)
ret = parse_u64(arg, &args->quorum_count);
if (ret)
return ret;
if (args->slots[ret].addr.v4.family != cpu_to_le16(SCOUTFS_AF_NONE))
argp_error(state, "Quorum slot %u already specified before slot '%s'\n",
ret, arg);
args->slots[ret] = slot;
args->nr_slots++;
break;
case 'f':
args->force = true;
@@ -480,14 +432,12 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
argp_error(state, "more than two arguments given");
break;
case ARGP_KEY_FINI:
if (!args->nr_slots)
argp_error(state, "must specify at least one quorum slot with --quorum-count|-Q");
if (!args->quorum_count)
argp_error(state, "must provide nonzero quorum count with --quorum-count|-Q option");
if (!args->meta_device)
argp_error(state, "no metadata device argument given");
if (!args->data_device)
argp_error(state, "no data device argument given");
if (!valid_quorum_slots(args->slots))
argp_error(state, "invalid quorum slot configuration");
break;
default:
break;
@@ -497,7 +447,7 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
}
static struct argp_option options[] = {
{ "quorum-slot", 'Q', "NR,ADDR,PORT", 0, "Specify quorum slot addresses [Required]"},
{ "quorum-count", 'Q', "NUM", 0, "Number of voters required to use the filesystem [Required]"},
{ "force", 'f', NULL, 0, "Overwrite existing data on block devices"},
{ "max-meta-size", 'm', "SIZE", 0, "Use a size less than the base metadata device size (bytes or KMGTP units)"},
{ "max-data-size", 'd', "SIZE", 0, "Use a size less than the base data device size (bytes or KMGTP units)"},
@@ -513,7 +463,7 @@ static struct argp argp = {
static int mkfs_cmd(int argc, char *argv[])
{
struct mkfs_args mkfs_args = {NULL,};
struct mkfs_args mkfs_args = {0};
int ret;
ret = argp_parse(&argp, argc, argv, 0, NULL, &mkfs_args);

View File

@@ -32,7 +32,7 @@ struct move_blocks_args {
static int do_move_blocks(struct move_blocks_args *args)
{
struct scoutfs_ioctl_move_blocks mb = {0};
struct scoutfs_ioctl_move_blocks mb;
int from_fd = -1;
int to_fd = -1;
int ret;

View File

@@ -3,9 +3,6 @@
#include <stdlib.h>
#include <limits.h>
#include <stdio.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "sparse.h"
#include "util.h"
@@ -155,66 +152,3 @@ int parse_timespec(char *str, struct timespec *ts)
return 0;
}
/*
* Parse a quorum slot specification string "NR,ADDR,PORT" into its
* component parts. We use sscanf to both parse the leading NR and
* trailing PORT integers, and to pull out the inner ADDR string which
* is then parsed to make sure that it's a valid unicast ipv4 address.
* We require that all components be specified, and sccanf will check
* this by the number of matches it returns.
*/
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
{
#define ADDR_CHARS 45 /* max ipv6 */
char addr[ADDR_CHARS + 1] = {'\0',};
struct in_addr in;
int port;
int parsed;
int nr;
int ret;
/* leading and trailing ints, an inner sized string without ,, all separated by , */
ret = sscanf(arg, "%u,%"__stringify(ADDR_CHARS)"[^,],%u%n",
&nr, addr, &port, &parsed);
if (ret == EOF) {
printf("error parsing quorum slot '%s': %s\n",
arg, strerror(errno));
return -EINVAL;
}
if (parsed != strlen(arg)) {
printf("extra unparsed trailing characters in quorum slot '%s'\n",
arg);
return -EINVAL;
}
if (ret != 3) {
printf("failed to parse all three NR,ADDR,PORT tokens in quorum slot '%s'\n", arg);
return -EINVAL;
}
if (nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
printf("invalid nr '%d' in quorum slot '%s', must be between 0 and %u\n",
nr, arg, SCOUTFS_QUORUM_MAX_SLOTS - 1);
return -EINVAL;
}
if (port <= 0 || port > USHRT_MAX) {
printf("invalid ipv4 port '%u' in quorum slot '%s', must be between 1 and %u\n",
port, arg, USHRT_MAX);
return -EINVAL;
}
if (inet_aton(addr, &in) == 0 || htonl(in.s_addr) == 0 ||
htonl(in.s_addr) == UINT_MAX) {
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
addr, arg);
return -EINVAL;
}
slot->addr.v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
slot->addr.v4.addr = cpu_to_le32(htonl(in.s_addr));
slot->addr.v4.port = cpu_to_le16(port);
return nr;
}

View File

@@ -4,14 +4,11 @@
#include <sys/time.h>
#include <argp.h>
struct scoutfs_quorum_slot;
int parse_human(char* str, u64 *val_ret);
int parse_u64(char *str, u64 *val_ret);
int parse_s64(char *str, s64 *val_ret);
int parse_u32(char *str, u32 *val_ret);
int parse_timespec(char *str, struct timespec *ts);
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg);
static inline char* strdup_or_error(const struct argp_state *state, char *str)
{

View File

@@ -339,6 +339,14 @@ static int print_srch_root_item(struct scoutfs_key *key, void *val,
return 0;
}
static int print_lock_clients_entry(struct scoutfs_key *key, void *val,
unsigned val_len, void *arg)
{
printf(" rid %016llx\n", le64_to_cpu(key->sklc_rid));
return 0;
}
static int print_trans_seqs_entry(struct scoutfs_key *key, void *val,
unsigned val_len, void *arg)
{
@@ -380,10 +388,10 @@ static int print_alloc_item(struct scoutfs_key *key, void *val,
typedef int (*print_item_func)(struct scoutfs_key *key, void *val,
unsigned val_len, void *arg);
static int print_block_ref(struct scoutfs_key *key, void *val,
static int print_btree_ref(struct scoutfs_key *key, void *val,
unsigned val_len, print_item_func func, void *arg)
{
struct scoutfs_block_ref *ref = val;
struct scoutfs_btree_ref *ref = val;
func(key, NULL, 0, arg);
printf(" ref blkno %llu seq %llu\n",
@@ -425,7 +433,7 @@ static void print_leaf_item_hash(struct scoutfs_btree_block *bt)
}
static int print_btree_block(int fd, struct scoutfs_super_block *super,
char *which, struct scoutfs_block_ref *ref,
char *which, struct scoutfs_btree_ref *ref,
print_item_func func, void *arg, u8 level)
{
struct scoutfs_btree_item *item;
@@ -492,7 +500,7 @@ static int print_btree_block(int fd, struct scoutfs_super_block *super,
val_len);
if (level)
print_block_ref(key, val, val_len, func, arg);
print_btree_ref(key, val, val_len, func, arg);
else
func(key, val, val_len, arg);
}
@@ -523,10 +531,11 @@ static int print_btree(int fd, struct scoutfs_super_block *super, char *which,
return ret;
}
static int print_alloc_list_block(int fd, char *str, struct scoutfs_block_ref *ref)
static int print_alloc_list_block(int fd, char *str,
struct scoutfs_alloc_list_ref *ref)
{
struct scoutfs_alloc_list_block *lblk;
struct scoutfs_block_ref next;
struct scoutfs_alloc_list_ref next;
u64 blkno;
u64 start;
u64 len;
@@ -574,7 +583,7 @@ static int print_alloc_list_block(int fd, char *str, struct scoutfs_block_ref *r
return print_alloc_list_block(fd, str, &next);
}
static int print_srch_block(int fd, struct scoutfs_block_ref *ref, int level)
static int print_srch_block(int fd, struct scoutfs_srch_ref *ref, int level)
{
struct scoutfs_srch_parent *srp;
struct scoutfs_srch_block *srb;
@@ -720,7 +729,7 @@ static int print_srch_root_files(struct scoutfs_key *key, void *val,
}
static int print_btree_leaf_items(int fd, struct scoutfs_super_block *super,
struct scoutfs_block_ref *ref,
struct scoutfs_btree_ref *ref,
print_item_func func, void *arg)
{
struct scoutfs_btree_item *item;
@@ -761,7 +770,7 @@ static int print_btree_leaf_items(int fd, struct scoutfs_super_block *super,
return 0;
}
static char *alloc_addr_str(union scoutfs_inet_addr *ia)
static char *alloc_addr_str(struct scoutfs_inet_addr *ia)
{
struct in_addr addr;
char *quad;
@@ -769,12 +778,12 @@ static char *alloc_addr_str(union scoutfs_inet_addr *ia)
int len;
memset(&addr, 0, sizeof(addr));
addr.s_addr = htonl(le32_to_cpu(ia->v4.addr));
addr.s_addr = htonl(le32_to_cpu(ia->addr));
quad = inet_ntoa(addr);
if (quad == NULL)
return NULL;
len = snprintf(NULL, 0, "%s:%u", quad, le16_to_cpu(ia->v4.port));
len = snprintf(NULL, 0, "%s:%u", quad, le16_to_cpu(ia->port));
if (len < 1 || len > 22)
return NULL;
@@ -783,29 +792,18 @@ static char *alloc_addr_str(union scoutfs_inet_addr *ia)
if (!str)
return NULL;
snprintf(str, len, "%s:%u", quad, le16_to_cpu(ia->v4.port));
snprintf(str, len, "%s:%u", quad, le16_to_cpu(ia->port));
return str;
}
#define OFF_NAME(x) \
{ offsetof(struct scoutfs_quorum_block, x), __stringify_1(x) }
static int print_quorum_blocks(int fd, struct scoutfs_super_block *super)
{
struct print_events {
size_t offset;
char *name;
} events[] = {
OFF_NAME(write), OFF_NAME(update_term), OFF_NAME(set_leader),
OFF_NAME(clear_leader), OFF_NAME(fenced),
};
struct scoutfs_quorum_block *blk = NULL;
struct scoutfs_quorum_block_event *ev;
char *log_addr = NULL;
u64 blkno;
int ret;
int i;
int e;
int j;
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
blkno = SCOUTFS_QUORUM_BLKNO + i;
@@ -814,21 +812,31 @@ static int print_quorum_blocks(int fd, struct scoutfs_super_block *super)
if (ret)
goto out;
printf("quorum blkno %llu (slot %llu)\n",
blkno, blkno - SCOUTFS_QUORUM_BLKNO);
print_block_header(&blk->hdr, SCOUTFS_BLOCK_SM_SIZE);
printf(" term %llu random_write_mark 0x%llx flags 0x%llx\n",
le64_to_cpu(blk->term),
le64_to_cpu(blk->random_write_mark),
le64_to_cpu(blk->flags));
for (e = 0; e < array_size(events); e++) {
ev = (void *)blk + events[e].offset;
printf(" %12s: rid %016llx ts %llu.%08u\n",
events[e].name, le64_to_cpu(ev->rid),
le64_to_cpu(ev->ts.sec),
le32_to_cpu(ev->ts.nsec));
if (blk->voter_rid != 0) {
printf("quorum block blkno %llu\n"
" fsid %llx blkno %llu crc 0x%08x\n"
" term %llu write_nr %llu voter_rid %016llx "
"vote_for_rid %016llx\n"
" log_nr %u\n",
blkno, le64_to_cpu(blk->fsid),
le64_to_cpu(blk->blkno), le32_to_cpu(blk->crc),
le64_to_cpu(blk->term),
le64_to_cpu(blk->write_nr),
le64_to_cpu(blk->voter_rid),
le64_to_cpu(blk->vote_for_rid),
blk->log_nr);
for (j = 0; j < blk->log_nr; j++) {
free(log_addr);
log_addr = alloc_addr_str(&blk->log[j].addr);
if (!log_addr) {
ret = -ENOMEM;
goto out;
}
printf(" [%u]: term %llu rid %llu addr %s\n",
j, le64_to_cpu(blk->log[j].term),
le64_to_cpu(blk->log[j].rid),
log_addr);
}
}
}
@@ -842,8 +850,7 @@ out:
static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
{
char uuid_str[37];
char *addr;
int i;
char *server_addr;
uuid_unparse(super->uuid, uuid_str);
@@ -853,14 +860,20 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
printf("super blkno %llu\n", blkno);
print_block_header(&super->hdr, SCOUTFS_BLOCK_SM_SIZE);
printf(" version %llx uuid %s\n",
le64_to_cpu(super->version), uuid_str);
printf(" format_hash %llx uuid %s\n",
le64_to_cpu(super->format_hash), uuid_str);
printf(" flags: 0x%016llx\n", le64_to_cpu(super->flags));
server_addr = alloc_addr_str(&super->server_addr);
if (!server_addr)
return;
/* XXX these are all in a crazy order */
printf(" next_ino %llu next_trans_seq %llu\n"
" total_meta_blocks %llu first_meta_blkno %llu last_meta_blkno %llu\n"
" total_data_blocks %llu first_data_blkno %llu last_data_blkno %llu\n"
" quorum_fenced_term %llu quorum_server_term %llu unmount_barrier %llu\n"
" quorum_count %u server_addr %s\n"
" meta_alloc[0]: "ALCROOT_F"\n"
" meta_alloc[1]: "ALCROOT_F"\n"
" data_alloc: "ALCROOT_F"\n"
@@ -868,6 +881,7 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
" server_meta_avail[1]: "AL_HEAD_F"\n"
" server_meta_freed[0]: "AL_HEAD_F"\n"
" server_meta_freed[1]: "AL_HEAD_F"\n"
" lock_clients root: height %u blkno %llu seq %llu\n"
" mounted_clients root: height %u blkno %llu seq %llu\n"
" srch_root root: height %u blkno %llu seq %llu\n"
" trans_seqs root: height %u blkno %llu seq %llu\n"
@@ -880,6 +894,11 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
le64_to_cpu(super->total_data_blocks),
le64_to_cpu(super->first_data_blkno),
le64_to_cpu(super->last_data_blkno),
le64_to_cpu(super->quorum_fenced_term),
le64_to_cpu(super->quorum_server_term),
le64_to_cpu(super->unmount_barrier),
super->quorum_count,
server_addr,
ALCROOT_A(&super->meta_alloc[0]),
ALCROOT_A(&super->meta_alloc[1]),
ALCROOT_A(&super->data_alloc),
@@ -887,6 +906,9 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
AL_HEAD_A(&super->server_meta_avail[1]),
AL_HEAD_A(&super->server_meta_freed[0]),
AL_HEAD_A(&super->server_meta_freed[1]),
super->lock_clients.height,
le64_to_cpu(super->lock_clients.ref.blkno),
le64_to_cpu(super->lock_clients.ref.seq),
super->mounted_clients.height,
le64_to_cpu(super->mounted_clients.ref.blkno),
le64_to_cpu(super->mounted_clients.ref.seq),
@@ -900,18 +922,7 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
le64_to_cpu(super->fs_root.ref.blkno),
le64_to_cpu(super->fs_root.ref.seq));
printf(" quorum config version %llu\n",
le64_to_cpu(super->qconf.version));
for (i = 0; i < array_size(super->qconf.slots); i++) {
if (super->qconf.slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
continue;
addr = alloc_addr_str(&super->qconf.slots[i].addr);
if (addr) {
printf(" quorum slot %2u: %s\n", i, addr);
free(addr);
}
}
free(server_addr);
}
struct print_args {
@@ -935,6 +946,11 @@ static int print_volume(int fd)
ret = print_quorum_blocks(fd, super);
err = print_btree(fd, super, "lock_clients", &super->lock_clients,
print_lock_clients_entry, NULL);
if (err && !ret)
ret = err;
err = print_btree(fd, super, "mounted_clients", &super->mounted_clients,
print_mounted_client_entry, NULL);
if (err && !ret)

View File

@@ -8,6 +8,7 @@
#include <errno.h>
#include <string.h>
#include <limits.h>
#include <assert.h>
#include <argp.h>
#include "sparse.h"
@@ -207,6 +208,9 @@ static int do_release(struct release_args *args)
return ret;
}
assert(args->offset % SCOUTFS_BLOCK_SM_SIZE == 0);
assert(args->length % SCOUTFS_BLOCK_SM_SIZE == 0);
ioctl_args.offset = args->offset;
ioctl_args.length = args->length;
ioctl_args.data_version = args->data_version;

View File

@@ -114,7 +114,4 @@ static inline int memcmp_lens(const void *a, int a_len,
int get_path(char *path, int flags);
int read_block(int fd, u64 blkno, int shift, void **ret_val);
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
#endif