mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-06 20:16:25 +00:00
Compare commits
68 Commits
zab/move_b
...
zab/fix-bl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5661a1fb02 | ||
|
|
12fa289399 | ||
|
|
75e8fab57c | ||
|
|
513d6b2734 | ||
|
|
f8d39610a2 | ||
|
|
c470c1c9f6 | ||
|
|
cad902b9cd | ||
|
|
e163f3b099 | ||
|
|
a508baae76 | ||
|
|
208c51d1d2 | ||
|
|
9450959ca4 | ||
|
|
6237f0adc5 | ||
|
|
f18fa0e97a | ||
|
|
0969a94bfc | ||
|
|
b1b75cbe9f | ||
|
|
0f14826ff8 | ||
|
|
336d521e44 | ||
|
|
4fab75b862 | ||
|
|
f6f72e7eae | ||
|
|
9878312b4d | ||
|
|
7421bd1861 | ||
|
|
1db6f8194d | ||
|
|
2de7692336 | ||
|
|
8c1d96898a | ||
|
|
090646aaeb | ||
|
|
d53350f9f1 | ||
|
|
57f34e90e9 | ||
|
|
79f6878355 | ||
|
|
740e13e53a | ||
|
|
dbb716f1bb | ||
|
|
87fcad5428 | ||
|
|
406d157891 | ||
|
|
8e34c5d66a | ||
|
|
1c7bbd6260 | ||
|
|
3ad18b0f3b | ||
|
|
79cd7a499b | ||
|
|
6ad18769cb | ||
|
|
49d82fcaaf | ||
|
|
e4e12c1968 | ||
|
|
15fd2ccc02 | ||
|
|
eea95357d3 | ||
|
|
9842c5d13e | ||
|
|
ade539217e | ||
|
|
5a90234c94 | ||
|
|
f81e4cb98a | ||
|
|
1fc706bf3f | ||
|
|
e9c3aa6501 | ||
|
|
d39268bbc1 | ||
|
|
35ed1a2438 | ||
|
|
32e7978a6e | ||
|
|
8123b8fc35 | ||
|
|
da5911c311 | ||
|
|
098fc420be | ||
|
|
7a96537210 | ||
|
|
0607dfdac8 | ||
|
|
0354bb64c5 | ||
|
|
631801c45c | ||
|
|
47a1ac92f7 | ||
|
|
004f693af3 | ||
|
|
f271a5d140 | ||
|
|
355eac79d2 | ||
|
|
d8b4e94854 | ||
|
|
bed33c7ffd | ||
|
|
b370730029 | ||
|
|
d64dd89ead | ||
|
|
8d81196e01 | ||
|
|
d731c1577e | ||
|
|
a421bb0884 |
45
README.md
45
README.md
@@ -31,15 +31,9 @@ functionality hasn't been implemented. It's appropriate for early
|
||||
adopters and interested developers, not for production use.
|
||||
|
||||
In that vein, expect significant incompatible changes to both the format
|
||||
of network messages and persistent structures. To avoid mistakes the
|
||||
implementation currently calculates a hash of the format and ioctl
|
||||
header files in the source tree. The kernel module will refuse to mount
|
||||
a volume created by userspace utilities with a mismatched hash, and it
|
||||
will refuse to connect to a remote node with a mismatched hash. This
|
||||
means having to unmount, mkfs, and remount everything across many
|
||||
functional changes. Once the format is nailed down we'll wire up
|
||||
forward and back compat machinery and remove this temporary safety
|
||||
measure.
|
||||
of network messages and persistent structures. Since the format hash-checking
|
||||
has now been removed in preparation for release, if there is any doubt, mkfs
|
||||
is strongly recommended.
|
||||
|
||||
The current kernel module is developed against the RHEL/CentOS 7.x
|
||||
kernel to minimize the friction of developing and testing with partners'
|
||||
@@ -71,8 +65,13 @@ The steps for getting scoutfs mounted and operational are:
|
||||
2. Make a new filesystem on the devices with the userspace utilities
|
||||
3. Mount the devices on all the nodes
|
||||
|
||||
In this example we run all of these commands on three nodes. The names
|
||||
of the block devices are the same on all the nodes.
|
||||
In this example we use three nodes. The names of the block devices are
|
||||
the same on all the nodes. Two of the nodes will be quorum members. A
|
||||
majority of quorum members must be mounted to elect a leader to run a
|
||||
server that all the mounts connect to. It should be noted that two
|
||||
quorum members results in a majority of one, each member itself, so
|
||||
split brain elections are possible but so unlikely that it's fine for a
|
||||
demonstration.
|
||||
|
||||
1. Get the Kernel Module and Userspace Binaries
|
||||
|
||||
@@ -94,24 +93,30 @@ of the block devices are the same on all the nodes.
|
||||
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
|
||||
```
|
||||
|
||||
2. Make a New Filesystem (**destroys contents, no questions asked**)
|
||||
2. Make a New Filesystem (**destroys contents**)
|
||||
|
||||
We specify that two of our three nodes must be present to form a
|
||||
quorum for the system to function.
|
||||
We specify quorum slots with the addresses of each of the quorum
|
||||
member nodes, the metadata device, and the data device.
|
||||
|
||||
```shell
|
||||
scoutfs mkfs -Q 2 /dev/meta_dev /dev/data_dev
|
||||
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
|
||||
```
|
||||
|
||||
3. Mount the Filesystem
|
||||
|
||||
Each mounting node provides its local IP address on which it will run
|
||||
an internal server for the other mounts if it is elected the leader by
|
||||
the quorum.
|
||||
First, mount each of the quorum nodes so that they can elect and
|
||||
start a server for the remaining node to connect to. The slot numbers
|
||||
were specified with the leading "0,..." and "1,..." in the mkfs options
|
||||
above.
|
||||
|
||||
```shell
|
||||
mkdir /mnt/scoutfs
|
||||
mount -t scoutfs -o server_addr=$NODE_ADDR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
Then mount the remaining node which can now connect to the running server.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
4. For Kicks, Observe the Metadata Change Index
|
||||
|
||||
@@ -16,11 +16,7 @@ SCOUTFS_GIT_DESCRIBE := \
|
||||
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
|
||||
echo no-git)
|
||||
|
||||
SCOUTFS_FORMAT_HASH := \
|
||||
$(shell cat src/format.h src/ioctl.h | md5sum | cut -b1-16)
|
||||
|
||||
SCOUTFS_ARGS := SCOUTFS_GIT_DESCRIBE=$(SCOUTFS_GIT_DESCRIBE) \
|
||||
SCOUTFS_FORMAT_HASH=$(SCOUTFS_FORMAT_HASH) \
|
||||
CONFIG_SCOUTFS_FS=m -C $(SK_KSRC) M=$(CURDIR)/src \
|
||||
EXTRA_CFLAGS="-Werror"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
obj-$(CONFIG_SCOUTFS_FS) := scoutfs.o
|
||||
|
||||
CFLAGS_super.o = -DSCOUTFS_GIT_DESCRIBE=\"$(SCOUTFS_GIT_DESCRIBE)\" \
|
||||
-DSCOUTFS_FORMAT_HASH=0x$(SCOUTFS_FORMAT_HASH)LLU
|
||||
CFLAGS_super.o = -DSCOUTFS_GIT_DESCRIBE=\"$(SCOUTFS_GIT_DESCRIBE)\"
|
||||
|
||||
CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
|
||||
|
||||
|
||||
158
kmod/src/alloc.c
158
kmod/src/alloc.c
@@ -252,7 +252,7 @@ void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
|
||||
{
|
||||
memset(alloc, 0, sizeof(struct scoutfs_alloc));
|
||||
|
||||
spin_lock_init(&alloc->lock);
|
||||
seqlock_init(&alloc->seqlock);
|
||||
mutex_init(&alloc->mutex);
|
||||
alloc->avail = *avail;
|
||||
alloc->freed = *freed;
|
||||
@@ -358,31 +358,24 @@ static void list_block_sort(struct scoutfs_alloc_list_block *lblk)
|
||||
|
||||
/*
|
||||
* We're always reading blocks that we own, so we shouldn't see stale
|
||||
* references. But the cached block can be stale and we can need to
|
||||
* invalidate it.
|
||||
* references but we could retry reads after dropping stale cached
|
||||
* blocks. If we do see a stale error then we've hit persistent
|
||||
* corruption.
|
||||
*/
|
||||
static int read_list_block(struct super_block *sb,
|
||||
struct scoutfs_alloc_list_ref *ref,
|
||||
static int read_list_block(struct super_block *sb, struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_block *bl = NULL;
|
||||
int ret;
|
||||
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (!IS_ERR_OR_NULL(bl) &&
|
||||
!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
|
||||
SCOUTFS_BLOCK_MAGIC_ALLOC_LIST)) {
|
||||
scoutfs_inc_counter(sb, alloc_stale_cached_list_block);
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
}
|
||||
if (IS_ERR(bl)) {
|
||||
*bl_ret = NULL;
|
||||
return PTR_ERR(bl);
|
||||
}
|
||||
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, bl_ret);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE) {
|
||||
scoutfs_inc_counter(sb, alloc_stale_list_block);
|
||||
ret = -EIO;
|
||||
}
|
||||
};
|
||||
|
||||
*bl_ret = bl;
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -396,86 +389,12 @@ static int read_list_block(struct super_block *sb,
|
||||
static int dirty_list_block(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_list_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
u64 dirty, u64 *old,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_block *cow_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
bool undo_alloc = false;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
blkno = le64_to_cpu(ref->blkno);
|
||||
if (blkno) {
|
||||
ret = read_list_block(sb, ref, &bl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_block_writer_is_dirty(sb, bl)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (dirty == 0) {
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &dirty);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
undo_alloc = true;
|
||||
}
|
||||
|
||||
cow_bl = scoutfs_block_create(sb, dirty);
|
||||
if (IS_ERR(cow_bl)) {
|
||||
ret = PTR_ERR(cow_bl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (old) {
|
||||
*old = blkno;
|
||||
} else if (blkno) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bl)
|
||||
memcpy(cow_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
|
||||
else
|
||||
memset(cow_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = cow_bl;
|
||||
cow_bl = NULL;
|
||||
|
||||
lblk = bl->data;
|
||||
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
|
||||
lblk->hdr.fsid = super->hdr.fsid;
|
||||
lblk->hdr.blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&lblk->hdr.seq, sizeof(lblk->hdr.seq));
|
||||
|
||||
ref->blkno = lblk->hdr.blkno;
|
||||
ref->seq = lblk->hdr.seq;
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, bl);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
scoutfs_block_put(sb, cow_bl);
|
||||
if (ret < 0 && undo_alloc) {
|
||||
err = scoutfs_free_meta(sb, alloc, wri, dirty);
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
}
|
||||
*bl_ret = bl;
|
||||
|
||||
return ret;
|
||||
return scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST,
|
||||
bl_ret, dirty, old);
|
||||
}
|
||||
|
||||
/* Allocate a new dirty list block if we fill up more than 3/4 of the block. */
|
||||
@@ -497,7 +416,7 @@ static int dirty_alloc_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri)
|
||||
{
|
||||
struct scoutfs_alloc_list_ref orig_freed;
|
||||
struct scoutfs_block_ref orig_freed;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_block *av_bl = NULL;
|
||||
struct scoutfs_block *fr_bl = NULL;
|
||||
@@ -607,7 +526,8 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&alloc->lock);
|
||||
write_seqlock(&alloc->seqlock);
|
||||
|
||||
lblk = alloc->dirty_avail_bl->data;
|
||||
if (WARN_ON_ONCE(lblk->nr == 0)) {
|
||||
/* shouldn't happen, transaction should commit first */
|
||||
@@ -617,7 +537,8 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
list_block_remove(&alloc->avail, lblk, 1);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&alloc->lock);
|
||||
|
||||
write_sequnlock(&alloc->seqlock);
|
||||
|
||||
out:
|
||||
if (ret < 0)
|
||||
@@ -640,7 +561,8 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&alloc->lock);
|
||||
write_seqlock(&alloc->seqlock);
|
||||
|
||||
lblk = alloc->dirty_freed_bl->data;
|
||||
if (WARN_ON_ONCE(list_block_space(lblk->nr) == 0)) {
|
||||
/* shouldn't happen, transaction should commit first */
|
||||
@@ -649,7 +571,8 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
list_block_add(&alloc->freed, lblk, blkno);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&alloc->lock);
|
||||
|
||||
write_sequnlock(&alloc->seqlock);
|
||||
|
||||
out:
|
||||
scoutfs_inc_counter(sb, alloc_free_meta);
|
||||
@@ -770,8 +693,13 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Special retval meaning there wasn't space to alloc from
|
||||
* this txn. Doesn't mean filesystem is completely full.
|
||||
* Maybe upper layers want to try again.
|
||||
*/
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENOSPC;
|
||||
ret = -ENOBUFS;
|
||||
*blkno_ret = 0;
|
||||
*count_ret = 0;
|
||||
} else {
|
||||
@@ -1101,7 +1029,7 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
struct scoutfs_alloc_list_head *src)
|
||||
{
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_alloc_list_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
struct scoutfs_block *prev = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
int ret = 0;
|
||||
@@ -1142,17 +1070,23 @@ out:
|
||||
|
||||
/*
|
||||
* Returns true if meta avail and free don't have room for the given
|
||||
* number of alloctions or frees.
|
||||
* number of allocations or frees. This is called at a significantly
|
||||
* higher frequency than allocations as writers try to enter
|
||||
* transactions. This is the only reader of the seqlock which gives
|
||||
* read-mostly sampling instead of bouncing a spinlock around all the
|
||||
* cores.
|
||||
*/
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr)
|
||||
{
|
||||
unsigned int seq;
|
||||
bool lo;
|
||||
|
||||
spin_lock(&alloc->lock);
|
||||
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
|
||||
list_block_space(alloc->freed.first_nr) < nr;
|
||||
spin_unlock(&alloc->lock);
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
|
||||
list_block_space(alloc->freed.first_nr) < nr;
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
|
||||
return lo;
|
||||
}
|
||||
@@ -1164,8 +1098,8 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_btree_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_btree_ref refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_srch_compact *sc;
|
||||
struct scoutfs_log_trees lt;
|
||||
|
||||
@@ -72,7 +72,8 @@
|
||||
* transaction.
|
||||
*/
|
||||
struct scoutfs_alloc {
|
||||
spinlock_t lock;
|
||||
/* writers rarely modify list_head avail/freed. readers often check for _meta_alloc_low */
|
||||
seqlock_t seqlock;
|
||||
struct mutex mutex;
|
||||
struct scoutfs_block *dirty_avail_bl;
|
||||
struct scoutfs_block *dirty_freed_bl;
|
||||
|
||||
756
kmod/src/block.c
756
kmod/src/block.c
File diff suppressed because it is too large
Load Diff
@@ -13,27 +13,16 @@ struct scoutfs_block {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
__le32 scoutfs_block_calc_crc(struct scoutfs_block_header *hdr, u32 size);
|
||||
bool scoutfs_block_valid_crc(struct scoutfs_block_header *hdr, u32 size);
|
||||
bool scoutfs_block_valid_ref(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr,
|
||||
__le64 seq, __le64 blkno);
|
||||
|
||||
struct scoutfs_block *scoutfs_block_create(struct super_block *sb, u64 blkno);
|
||||
struct scoutfs_block *scoutfs_block_read(struct super_block *sb, u64 blkno);
|
||||
void scoutfs_block_invalidate(struct super_block *sb, struct scoutfs_block *bl);
|
||||
bool scoutfs_block_consistent_ref(struct super_block *sb,
|
||||
struct scoutfs_block *bl,
|
||||
__le64 seq, __le64 blkno, u32 magic);
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret);
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
|
||||
|
||||
void scoutfs_block_writer_init(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri);
|
||||
void scoutfs_block_writer_mark_dirty(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_block *bl);
|
||||
bool scoutfs_block_writer_is_dirty(struct super_block *sb,
|
||||
struct scoutfs_block *bl);
|
||||
int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_block_ref *ref,
|
||||
u32 magic, struct scoutfs_block **bl_ret,
|
||||
u64 dirty_blkno, u64 *ref_blkno);
|
||||
int scoutfs_block_writer_write(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri);
|
||||
void scoutfs_block_writer_forget_all(struct super_block *sb,
|
||||
|
||||
154
kmod/src/btree.c
154
kmod/src/btree.c
@@ -80,7 +80,7 @@ enum btree_walk_flags {
|
||||
BTW_NEXT = (1 << 0), /* return >= key */
|
||||
BTW_PREV = (1 << 1), /* return <= key */
|
||||
BTW_DIRTY = (1 << 2), /* cow stable blocks */
|
||||
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref */
|
||||
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref, requires dirty */
|
||||
BTW_INSERT = (1 << 4), /* walking to insert, try splitting */
|
||||
BTW_DELETE = (1 << 5), /* walking to delete, try joining */
|
||||
};
|
||||
@@ -619,140 +619,36 @@ static void move_items(struct scoutfs_btree_block *dst,
|
||||
* This is used to lookup cached blocks, read blocks, cow blocks for
|
||||
* dirtying, and allocate new blocks.
|
||||
*
|
||||
* Btree blocks don't have rigid cache consistency. We can be following
|
||||
* block references into cached blocks that are now stale or can be
|
||||
* following a stale root into blocks that have been overwritten. If we
|
||||
* hit a block that looks stale we first invalidate the cache and retry,
|
||||
* returning -ESTALE if it still looks wrong. The caller can retry the
|
||||
* read from a more current root or decide that this is a persistent
|
||||
* error.
|
||||
* If we read a stale block we return stale so the caller can retry with
|
||||
* a newer root or return an error.
|
||||
*/
|
||||
static int get_ref_block(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, int flags,
|
||||
struct scoutfs_btree_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_btree_block *bt = NULL;
|
||||
struct scoutfs_btree_block *new;
|
||||
struct scoutfs_block *new_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
bool retried = false;
|
||||
u64 blkno;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
/* always get the current block, either to return or cow from */
|
||||
if (ref && ref->blkno) {
|
||||
retry:
|
||||
if (WARN_ON_ONCE((flags & BTW_ALLOC) && !(flags & BTW_DIRTY)))
|
||||
return -EINVAL;
|
||||
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (IS_ERR(bl)) {
|
||||
trace_scoutfs_btree_read_error(sb, ref);
|
||||
scoutfs_inc_counter(sb, btree_read_error);
|
||||
ret = PTR_ERR(bl);
|
||||
goto out;
|
||||
}
|
||||
bt = (void *)bl->data;
|
||||
|
||||
if (!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
|
||||
SCOUTFS_BLOCK_MAGIC_BTREE) ||
|
||||
scoutfs_trigger(sb, BTREE_STALE_READ)) {
|
||||
|
||||
scoutfs_inc_counter(sb, btree_stale_read);
|
||||
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
|
||||
if (!retried) {
|
||||
retried = true;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to create a new dirty copy of the block if
|
||||
* the caller asked for it. If the block is already
|
||||
* dirty then we can return it.
|
||||
*/
|
||||
if (!(flags & BTW_DIRTY) ||
|
||||
scoutfs_block_writer_is_dirty(sb, bl)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
} else if (!(flags & BTW_ALLOC)) {
|
||||
if (ref->blkno == 0 && !(flags & BTW_ALLOC)) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
prandom_bytes(&seq, sizeof(seq));
|
||||
|
||||
new_bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(new_bl)) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
BUG_ON(ret);
|
||||
ret = PTR_ERR(new_bl);
|
||||
goto out;
|
||||
}
|
||||
new = (void *)new_bl->data;
|
||||
|
||||
/* free old stable blkno we're about to overwrite */
|
||||
if (ref && ref->blkno) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri,
|
||||
le64_to_cpu(ref->blkno));
|
||||
if (ret) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
BUG_ON(ret);
|
||||
scoutfs_block_put(sb, new_bl);
|
||||
new_bl = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, new_bl);
|
||||
|
||||
trace_scoutfs_btree_dirty_block(sb, blkno, seq,
|
||||
bt ? le64_to_cpu(bt->hdr.blkno) : 0,
|
||||
bt ? le64_to_cpu(bt->hdr.seq) : 0);
|
||||
|
||||
if (bt) {
|
||||
/* returning a cow of an existing block */
|
||||
memcpy(new, bt, SCOUTFS_BLOCK_LG_SIZE);
|
||||
scoutfs_block_put(sb, bl);
|
||||
} else {
|
||||
/* returning a newly allocated block */
|
||||
memset(new, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
new->hdr.fsid = super->hdr.fsid;
|
||||
}
|
||||
bl = new_bl;
|
||||
bt = new;
|
||||
|
||||
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
|
||||
bt->hdr.blkno = cpu_to_le64(blkno);
|
||||
bt->hdr.seq = cpu_to_le64(seq);
|
||||
if (ref) {
|
||||
ref->blkno = bt->hdr.blkno;
|
||||
ref->seq = bt->hdr.seq;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
if (flags & BTW_DIRTY)
|
||||
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_BTREE,
|
||||
bl_ret, 0, NULL);
|
||||
else
|
||||
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BTREE, bl_ret);
|
||||
out:
|
||||
if (ret) {
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE)
|
||||
scoutfs_inc_counter(sb, btree_stale_read);
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -766,7 +662,7 @@ static void create_parent_item(struct scoutfs_btree_block *parent,
|
||||
{
|
||||
struct scoutfs_avl_node *par;
|
||||
int cmp;
|
||||
struct scoutfs_btree_ref ref = {
|
||||
struct scoutfs_block_ref ref = {
|
||||
.blkno = child->hdr.blkno,
|
||||
.seq = child->hdr.seq,
|
||||
};
|
||||
@@ -784,7 +680,7 @@ static void update_parent_item(struct scoutfs_btree_block *parent,
|
||||
struct scoutfs_btree_item *par_item,
|
||||
struct scoutfs_btree_block *child)
|
||||
{
|
||||
struct scoutfs_btree_ref *ref = item_val(parent, par_item);
|
||||
struct scoutfs_block_ref *ref = item_val(parent, par_item);
|
||||
|
||||
par_item->key = *item_key(last_item(child));
|
||||
ref->blkno = child->hdr.blkno;
|
||||
@@ -832,12 +728,13 @@ static int try_split(struct super_block *sb,
|
||||
struct scoutfs_block *par_bl = NULL;
|
||||
struct scoutfs_btree_block *left;
|
||||
struct scoutfs_key max_key;
|
||||
struct scoutfs_block_ref zeros;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
/* parents need to leave room for child references */
|
||||
if (right->level)
|
||||
val_len = sizeof(struct scoutfs_btree_ref);
|
||||
val_len = sizeof(struct scoutfs_block_ref);
|
||||
|
||||
/* don't need to split if there's enough space for the item */
|
||||
if (mid_free_item_room(right, val_len))
|
||||
@@ -849,7 +746,8 @@ static int try_split(struct super_block *sb,
|
||||
scoutfs_inc_counter(sb, btree_split);
|
||||
|
||||
/* alloc split neighbour first to avoid unwinding tree growth */
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &left_bl);
|
||||
memset(&zeros, 0, sizeof(zeros));
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &left_bl);
|
||||
if (ret)
|
||||
return ret;
|
||||
left = left_bl->data;
|
||||
@@ -857,7 +755,8 @@ static int try_split(struct super_block *sb,
|
||||
init_btree_block(left, right->level);
|
||||
|
||||
if (!parent) {
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &par_bl);
|
||||
memset(&zeros, 0, sizeof(zeros));
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &par_bl);
|
||||
if (ret) {
|
||||
err = scoutfs_free_meta(sb, alloc, wri,
|
||||
le64_to_cpu(left->hdr.blkno));
|
||||
@@ -905,7 +804,7 @@ static int try_join(struct super_block *sb,
|
||||
struct scoutfs_btree_item *sib_par_item;
|
||||
struct scoutfs_btree_block *sib;
|
||||
struct scoutfs_block *sib_bl;
|
||||
struct scoutfs_btree_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
unsigned int sib_tot;
|
||||
bool move_right;
|
||||
int to_move;
|
||||
@@ -1194,7 +1093,7 @@ static int btree_walk(struct super_block *sb,
|
||||
struct scoutfs_btree_item *prev;
|
||||
struct scoutfs_avl_node *next_node;
|
||||
struct scoutfs_avl_node *node;
|
||||
struct scoutfs_btree_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
unsigned int level;
|
||||
unsigned int nr;
|
||||
int ret;
|
||||
@@ -1225,8 +1124,7 @@ restart:
|
||||
if (!(flags & BTW_INSERT)) {
|
||||
ret = -ENOENT;
|
||||
} else {
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC,
|
||||
&root->ref, &bl);
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &root->ref, &bl);
|
||||
if (ret == 0) {
|
||||
bt = bl->data;
|
||||
init_btree_block(bt, 0);
|
||||
|
||||
@@ -34,13 +34,10 @@
|
||||
|
||||
/*
|
||||
* The client is responsible for maintaining a connection to the server.
|
||||
* This includes managing quorum elections that determine which client
|
||||
* should run the server that all the clients connect to.
|
||||
*/
|
||||
|
||||
#define CLIENT_CONNECT_DELAY_MS (MSEC_PER_SEC / 10)
|
||||
#define CLIENT_CONNECT_TIMEOUT_MS (1 * MSEC_PER_SEC)
|
||||
#define CLIENT_QUORUM_TIMEOUT_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct client_info {
|
||||
struct super_block *sb;
|
||||
@@ -121,16 +118,14 @@ int scoutfs_client_get_roots(struct super_block *sb,
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
__le64 before = cpu_to_le64p(seq);
|
||||
__le64 after;
|
||||
__le64 leseq;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
&before, sizeof(before),
|
||||
&after, sizeof(after));
|
||||
NULL, 0, &leseq, sizeof(leseq));
|
||||
if (ret == 0)
|
||||
*seq = le64_to_cpu(after);
|
||||
*seq = le64_to_cpu(leseq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -282,10 +277,10 @@ static int client_greeting(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->format_hash != super->format_hash) {
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "server sent format 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->format_hash),
|
||||
le64_to_cpu(super->format_hash));
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -294,52 +289,30 @@ static int client_greeting(struct super_block *sb,
|
||||
scoutfs_net_client_greeting(sb, conn, new_server);
|
||||
|
||||
client->server_term = le64_to_cpu(gr->server_term);
|
||||
client->greeting_umb = le64_to_cpu(gr->unmount_barrier);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
* the work if the work fails and we're not shutting down.
|
||||
* The client is deciding if it needs to keep trying to reconnect to
|
||||
* have its farewell request processed. The server removes our mounted
|
||||
* client item last so that if we don't see it we know the server has
|
||||
* processed our farewell and we don't need to reconnect, we can unmount
|
||||
* safely.
|
||||
*
|
||||
* In the typical case a mount reads the super blocks and finds the
|
||||
* address of the currently running server and connects to it.
|
||||
* Non-voting clients who can't connect will keep trying alternating
|
||||
* reading the address and getting connect timeouts.
|
||||
*
|
||||
* Voting mounts will try to elect a leader if they can't connect to the
|
||||
* server. When a quorum can't connect and are able to elect a leader
|
||||
* then a new server is started. The new server will write its address
|
||||
* in the super and everyone will be able to connect.
|
||||
*
|
||||
* There's a tricky bit of coordination required to safely unmount.
|
||||
* Clients need to tell the server that they won't be coming back with a
|
||||
* farewell request. Once a client receives its farewell response it
|
||||
* can exit. But a majority of clients need to stick around to elect a
|
||||
* server to process all their farewell requests. This is coordinated
|
||||
* by having the greeting tell the server that a client is a voter. The
|
||||
* server then holds on to farewell requests from voters until only
|
||||
* requests from the final quorum remain. These farewell responses are
|
||||
* only sent after updating an unmount barrier in the super to indicate
|
||||
* to the final quorum that they can safely exit without having received
|
||||
* a farewell response over the network.
|
||||
* This is peeking at btree blocks that the server could be actively
|
||||
* freeing with cow updates so it can see stale blocks, we just return
|
||||
* the error and we'll retry eventually as the connection times out.
|
||||
*/
|
||||
static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
|
||||
{
|
||||
struct client_info *client = container_of(work, struct client_info,
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_voter = opts->server_addr.sin_addr.s_addr != 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
ktime_t timeout_abs;
|
||||
u64 elected_term;
|
||||
struct scoutfs_key key = {
|
||||
.sk_zone = SCOUTFS_MOUNTED_CLIENT_ZONE,
|
||||
.skmc_rid = cpu_to_le64(rid),
|
||||
};
|
||||
struct scoutfs_super_block *super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -352,57 +325,77 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* can safely unmount if we see that server processed our farewell */
|
||||
if (am_voter && client->sending_farewell &&
|
||||
(le64_to_cpu(super->unmount_barrier) > client->greeting_umb)) {
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key, &iref);
|
||||
if (ret == 0) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = 1;
|
||||
}
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
kfree(super);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
* the work if the work fails and we're not shutting down.
|
||||
*
|
||||
* We ask quorum for an address to try and connect to. If there isn't
|
||||
* one, or it fails, we back off a bit before trying again.
|
||||
*
|
||||
* There's a tricky bit of coordination required to safely unmount.
|
||||
* Clients need to tell the server that they won't be coming back with a
|
||||
* farewell request. Once the server processes a farewell request from
|
||||
* the client it can forget the client. If the connection is broken
|
||||
* before the client gets the farewell response it doesn't want to
|
||||
* reconnect to send it again.. instead the client can read the metadata
|
||||
* device to check for the lack of an item which indicates that the
|
||||
* server has processed its farewell.
|
||||
*/
|
||||
static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct client_info *client = container_of(work, struct client_info,
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
int ret;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
client->farewell_error = 0;
|
||||
complete(&client->farewell_comp);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* try to connect to the super's server address */
|
||||
scoutfs_addr_to_sin(&sin, &super->server_addr);
|
||||
if (sin.sin_addr.s_addr != 0 && sin.sin_port != 0)
|
||||
ret = scoutfs_net_connect(sb, client->conn, &sin,
|
||||
CLIENT_CONNECT_TIMEOUT_MS);
|
||||
else
|
||||
ret = -ENOTCONN;
|
||||
|
||||
/* voters try to elect a leader if they couldn't connect */
|
||||
if (ret < 0) {
|
||||
/* non-voters will keep retrying */
|
||||
if (!am_voter)
|
||||
goto out;
|
||||
|
||||
/* make sure local server isn't writing super during votes */
|
||||
scoutfs_server_stop(sb);
|
||||
|
||||
timeout_abs = ktime_add_ms(ktime_get(),
|
||||
CLIENT_QUORUM_TIMEOUT_MS);
|
||||
|
||||
ret = scoutfs_quorum_election(sb, timeout_abs,
|
||||
le64_to_cpu(super->quorum_server_term),
|
||||
&elected_term);
|
||||
/* start the server if we were asked to */
|
||||
if (elected_term > 0)
|
||||
ret = scoutfs_server_start(sb, &opts->server_addr,
|
||||
elected_term);
|
||||
ret = -ENOTCONN;
|
||||
ret = scoutfs_quorum_server_sin(sb, &sin);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_net_connect(sb, client->conn, &sin,
|
||||
CLIENT_CONNECT_TIMEOUT_MS);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.format_hash = super->format_hash;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.unmount_barrier = cpu_to_le64(client->greeting_umb);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
greet.flags = 0;
|
||||
if (client->sending_farewell)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_FAREWELL);
|
||||
if (am_voter)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_VOTER);
|
||||
if (am_quorum)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_QUORUM);
|
||||
|
||||
ret = scoutfs_net_submit_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_GREETING,
|
||||
@@ -411,7 +404,6 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
out:
|
||||
kfree(super);
|
||||
|
||||
/* always have a small delay before retrying to avoid storms */
|
||||
if (ret && !atomic_read(&client->shutting_down))
|
||||
|
||||
315
kmod/src/count.h
315
kmod/src/count.h
@@ -1,315 +0,0 @@
|
||||
#ifndef _SCOUTFS_COUNT_H_
|
||||
#define _SCOUTFS_COUNT_H_
|
||||
|
||||
/*
|
||||
* Our estimate of the space consumed while dirtying items is based on
|
||||
* the number of items and the size of their values.
|
||||
*
|
||||
* The estimate is still a read-only input to entering the transaction.
|
||||
* We'd like to use it as a clean rhs arg to hold_trans. We define SIC_
|
||||
* functions which return the count struct. This lets us have a single
|
||||
* arg and avoid bugs in initializing and passing in struct pointers
|
||||
* from callers. The internal __count functions are used compose an
|
||||
* estimate out of the sets of items it manipulates. We program in much
|
||||
* clearer C instead of in the preprocessor.
|
||||
*
|
||||
* Compilers are able to collapse the inlines into constants for the
|
||||
* constant estimates.
|
||||
*/
|
||||
|
||||
struct scoutfs_item_count {
|
||||
signed items;
|
||||
signed vals;
|
||||
};
|
||||
|
||||
/* The caller knows exactly what they're doing. */
|
||||
static inline const struct scoutfs_item_count SIC_EXACT(signed items,
|
||||
signed vals)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {
|
||||
.items = items,
|
||||
.vals = vals,
|
||||
};
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocating an inode creates a new set of indexed items.
|
||||
*/
|
||||
static inline void __count_alloc_inode(struct scoutfs_item_count *cnt)
|
||||
{
|
||||
const int nr_indices = SCOUTFS_INODE_INDEX_NR;
|
||||
|
||||
cnt->items += 1 + nr_indices;
|
||||
cnt->vals += sizeof(struct scoutfs_inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dirtying an inode dirties the inode item and can delete and create
|
||||
* the full set of indexed items.
|
||||
*/
|
||||
static inline void __count_dirty_inode(struct scoutfs_item_count *cnt)
|
||||
{
|
||||
const int nr_indices = 2 * SCOUTFS_INODE_INDEX_NR;
|
||||
|
||||
cnt->items += 1 + nr_indices;
|
||||
cnt->vals += sizeof(struct scoutfs_inode);
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_ALLOC_INODE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_alloc_inode(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_DIRTY_INODE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Directory entries are stored in three items.
|
||||
*/
|
||||
static inline void __count_dirents(struct scoutfs_item_count *cnt,
|
||||
unsigned name_len)
|
||||
{
|
||||
cnt->items += 3;
|
||||
cnt->vals += 3 * offsetof(struct scoutfs_dirent, name[name_len]);
|
||||
}
|
||||
|
||||
static inline void __count_sym_target(struct scoutfs_item_count *cnt,
|
||||
unsigned size)
|
||||
{
|
||||
unsigned nr = DIV_ROUND_UP(size, SCOUTFS_MAX_VAL_SIZE);
|
||||
|
||||
cnt->items += nr;
|
||||
cnt->vals += size;
|
||||
}
|
||||
|
||||
static inline void __count_orphan(struct scoutfs_item_count *cnt)
|
||||
{
|
||||
|
||||
cnt->items += 1;
|
||||
}
|
||||
|
||||
static inline void __count_mknod(struct scoutfs_item_count *cnt,
|
||||
unsigned name_len)
|
||||
{
|
||||
__count_alloc_inode(cnt);
|
||||
__count_dirents(cnt, name_len);
|
||||
__count_dirty_inode(cnt);
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_MKNOD(unsigned name_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_mknod(&cnt, name_len);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dropping the inode deletes all its items. Potentially enormous numbers
|
||||
* of items (data mapping, xattrs) are deleted in their own transactions.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_DROP_INODE(int mode,
|
||||
u64 size)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
if (S_ISLNK(mode))
|
||||
__count_sym_target(&cnt, size);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_orphan(&cnt);
|
||||
|
||||
cnt.vals = 0;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_LINK(unsigned name_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirents(&cnt, name_len);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlink can add orphan items.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_UNLINK(unsigned name_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirents(&cnt, name_len);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_orphan(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_SYMLINK(unsigned name_len,
|
||||
unsigned size)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_mknod(&cnt, name_len);
|
||||
__count_sym_target(&cnt, size);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* This assumes the worst case of a rename between directories that
|
||||
* unlinks an existing target. That'll be worse than the common case
|
||||
* by a few hundred bytes.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_RENAME(unsigned old_len,
|
||||
unsigned new_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
/* dirty dirs and inodes */
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
/* unlink old and new, link new */
|
||||
__count_dirents(&cnt, old_len);
|
||||
__count_dirents(&cnt, new_len);
|
||||
__count_dirents(&cnt, new_len);
|
||||
|
||||
/* orphan the existing target */
|
||||
__count_orphan(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Creating an xattr results in a dirty set of items with values that
|
||||
* store the xattr header, name, and value. There's always at least one
|
||||
* item with the header and name. Any previously existing items are
|
||||
* deleted which dirties their key but removes their value. The two
|
||||
* sets of items are indexed by different ids so their items don't
|
||||
* overlap.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_XATTR_SET(unsigned old_parts,
|
||||
bool creating,
|
||||
unsigned name_len,
|
||||
unsigned size)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned int new_parts;
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
if (old_parts)
|
||||
cnt.items += old_parts;
|
||||
|
||||
if (creating) {
|
||||
new_parts = SCOUTFS_XATTR_NR_PARTS(name_len, size);
|
||||
|
||||
cnt.items += new_parts;
|
||||
cnt.vals += sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* write_begin can have to allocate all the blocks in the page and can
|
||||
* have to add a big allocation from the server to do so:
|
||||
* - merge added free extents from the server
|
||||
* - remove a free extent per block
|
||||
* - remove an offline extent for every other block
|
||||
* - add a file extent per block
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_WRITE_BEGIN(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned nr_free = (1 + SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
|
||||
unsigned nr_file = (DIV_ROUND_UP(SCOUTFS_BLOCK_SM_PER_PAGE, 2) +
|
||||
SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items += nr_free + nr_file;
|
||||
cnt.vals += nr_file;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncating an extent can:
|
||||
* - delete existing file extent,
|
||||
* - create two surrounding file extents,
|
||||
* - add an offline file extent,
|
||||
* - delete two existing free extents
|
||||
* - create a merged free extent
|
||||
*/
|
||||
static inline const struct scoutfs_item_count
|
||||
SIC_TRUNC_EXTENT(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned int nr_file = 1 + 2 + 1;
|
||||
unsigned int nr_free = (2 + 1) * 2;
|
||||
|
||||
if (inode)
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items += nr_file + nr_free;
|
||||
cnt.vals += nr_file;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallocating an extent can, at most:
|
||||
* - allocate from the server: delete two free and insert merged
|
||||
* - free an allocated extent: delete one and create two split
|
||||
* - remove an unallocated file extent: delete one and create two split
|
||||
* - add an fallocated flie extent: delete two and inset one merged
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_FALLOCATE_ONE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned int nr_free = ((1 + 2) * 2) * 2;
|
||||
unsigned int nr_file = (1 + 2) * 2;
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items += nr_free + nr_file;
|
||||
cnt.vals += nr_file;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* ioc_setattr_more can dirty the inode and add a single offline extent.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_SETATTR_MORE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items++;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -20,17 +20,21 @@
|
||||
EXPAND_COUNTER(alloc_list_freed_hi) \
|
||||
EXPAND_COUNTER(alloc_move) \
|
||||
EXPAND_COUNTER(alloc_moved_extent) \
|
||||
EXPAND_COUNTER(alloc_stale_cached_list_block) \
|
||||
EXPAND_COUNTER(block_cache_access) \
|
||||
EXPAND_COUNTER(alloc_stale_list_block) \
|
||||
EXPAND_COUNTER(block_cache_access_update) \
|
||||
EXPAND_COUNTER(block_cache_alloc_failure) \
|
||||
EXPAND_COUNTER(block_cache_alloc_page_order) \
|
||||
EXPAND_COUNTER(block_cache_alloc_virt) \
|
||||
EXPAND_COUNTER(block_cache_end_io_error) \
|
||||
EXPAND_COUNTER(block_cache_forget) \
|
||||
EXPAND_COUNTER(block_cache_free) \
|
||||
EXPAND_COUNTER(block_cache_invalidate) \
|
||||
EXPAND_COUNTER(block_cache_lru_move) \
|
||||
EXPAND_COUNTER(block_cache_free_work) \
|
||||
EXPAND_COUNTER(block_cache_remove_stale) \
|
||||
EXPAND_COUNTER(block_cache_shrink) \
|
||||
EXPAND_COUNTER(block_cache_shrink_next) \
|
||||
EXPAND_COUNTER(block_cache_shrink_recent) \
|
||||
EXPAND_COUNTER(block_cache_shrink_remove) \
|
||||
EXPAND_COUNTER(block_cache_shrink_restart) \
|
||||
EXPAND_COUNTER(btree_compact_values) \
|
||||
EXPAND_COUNTER(btree_compact_values_enomem) \
|
||||
EXPAND_COUNTER(btree_delete) \
|
||||
@@ -42,7 +46,6 @@
|
||||
EXPAND_COUNTER(btree_lookup) \
|
||||
EXPAND_COUNTER(btree_next) \
|
||||
EXPAND_COUNTER(btree_prev) \
|
||||
EXPAND_COUNTER(btree_read_error) \
|
||||
EXPAND_COUNTER(btree_split) \
|
||||
EXPAND_COUNTER(btree_stale_read) \
|
||||
EXPAND_COUNTER(btree_update) \
|
||||
@@ -58,6 +61,8 @@
|
||||
EXPAND_COUNTER(corrupt_symlink_inode_size) \
|
||||
EXPAND_COUNTER(corrupt_symlink_missing_item) \
|
||||
EXPAND_COUNTER(corrupt_symlink_not_null_term) \
|
||||
EXPAND_COUNTER(data_fallocate_enobufs_retry) \
|
||||
EXPAND_COUNTER(data_write_begin_enobufs_retry) \
|
||||
EXPAND_COUNTER(dentry_revalidate_error) \
|
||||
EXPAND_COUNTER(dentry_revalidate_invalid) \
|
||||
EXPAND_COUNTER(dentry_revalidate_locked) \
|
||||
@@ -71,6 +76,7 @@
|
||||
EXPAND_COUNTER(ext_op_remove) \
|
||||
EXPAND_COUNTER(forest_bloom_fail) \
|
||||
EXPAND_COUNTER(forest_bloom_pass) \
|
||||
EXPAND_COUNTER(forest_bloom_stale) \
|
||||
EXPAND_COUNTER(forest_read_items) \
|
||||
EXPAND_COUNTER(forest_roots_next_hint) \
|
||||
EXPAND_COUNTER(forest_set_bloom_bits) \
|
||||
@@ -137,18 +143,21 @@
|
||||
EXPAND_COUNTER(net_recv_invalid_message) \
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(quorum_cycle) \
|
||||
EXPAND_COUNTER(quorum_elected_leader) \
|
||||
EXPAND_COUNTER(quorum_election_timeout) \
|
||||
EXPAND_COUNTER(quorum_failure) \
|
||||
EXPAND_COUNTER(quorum_read_block) \
|
||||
EXPAND_COUNTER(quorum_read_block_error) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
EXPAND_COUNTER(quorum_read_invalid_block) \
|
||||
EXPAND_COUNTER(quorum_saw_super_leader) \
|
||||
EXPAND_COUNTER(quorum_timedout) \
|
||||
EXPAND_COUNTER(quorum_write_block) \
|
||||
EXPAND_COUNTER(quorum_write_block_error) \
|
||||
EXPAND_COUNTER(quorum_fenced) \
|
||||
EXPAND_COUNTER(quorum_recv_error) \
|
||||
EXPAND_COUNTER(quorum_recv_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_recv_invalid) \
|
||||
EXPAND_COUNTER(quorum_recv_resignation) \
|
||||
EXPAND_COUNTER(quorum_recv_vote) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_send_resignation) \
|
||||
EXPAND_COUNTER(quorum_send_request) \
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
EXPAND_COUNTER(quorum_server_shutdown) \
|
||||
EXPAND_COUNTER(quorum_term_follower) \
|
||||
EXPAND_COUNTER(server_commit_hold) \
|
||||
EXPAND_COUNTER(server_commit_queue) \
|
||||
EXPAND_COUNTER(server_commit_worker) \
|
||||
@@ -158,7 +167,6 @@
|
||||
EXPAND_COUNTER(srch_compact_flush) \
|
||||
EXPAND_COUNTER(srch_compact_log_page) \
|
||||
EXPAND_COUNTER(srch_compact_removed_entry) \
|
||||
EXPAND_COUNTER(srch_inconsistent_ref) \
|
||||
EXPAND_COUNTER(srch_rotate_log) \
|
||||
EXPAND_COUNTER(srch_search_log) \
|
||||
EXPAND_COUNTER(srch_search_log_block) \
|
||||
|
||||
@@ -37,7 +37,6 @@
|
||||
#include "lock.h"
|
||||
#include "file.h"
|
||||
#include "msg.h"
|
||||
#include "count.h"
|
||||
#include "ext.h"
|
||||
#include "util.h"
|
||||
|
||||
@@ -291,7 +290,6 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
u64 ino, u64 iblock, u64 last, bool offline,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_item_count cnt = SIC_TRUNC_EXTENT(inode);
|
||||
struct scoutfs_inode_info *si = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
s64 ret = 0;
|
||||
@@ -315,9 +313,9 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
while (iblock <= last) {
|
||||
if (inode)
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
|
||||
true, cnt);
|
||||
true);
|
||||
else
|
||||
ret = scoutfs_hold_trans(sb, cnt);
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -753,13 +751,13 @@ static int scoutfs_write_begin(struct file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
do {
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &wbd->ind_locks, inode,
|
||||
true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &wbd->ind_locks,
|
||||
ind_seq,
|
||||
SIC_WRITE_BEGIN());
|
||||
ind_seq);
|
||||
} while (ret > 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -768,17 +766,22 @@ static int scoutfs_write_begin(struct file *file,
|
||||
flags |= AOP_FLAG_NOFS;
|
||||
|
||||
/* generic write_end updates i_size and calls dirty_inode */
|
||||
ret = scoutfs_dirty_inode_item(inode, wbd->lock);
|
||||
if (ret == 0)
|
||||
ret = block_write_begin(mapping, pos, len, flags, pagep,
|
||||
scoutfs_get_block_write);
|
||||
if (ret)
|
||||
ret = scoutfs_dirty_inode_item(inode, wbd->lock) ?:
|
||||
block_write_begin(mapping, pos, len, flags, pagep,
|
||||
scoutfs_get_block_write);
|
||||
if (ret < 0) {
|
||||
scoutfs_release_trans(sb);
|
||||
out:
|
||||
if (ret) {
|
||||
scoutfs_inode_index_unlock(sb, &wbd->ind_locks);
|
||||
kfree(wbd);
|
||||
if (ret == -ENOBUFS) {
|
||||
/* Retry with a new transaction. */
|
||||
scoutfs_inc_counter(sb, data_write_begin_enobufs_retry);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret < 0)
|
||||
kfree(wbd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1007,8 +1010,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
|
||||
while(iblock <= last) {
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_FALLOCATE_ONE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1026,6 +1028,12 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
/* txn couldn't meet the request. Let's try with a new txn */
|
||||
if (ret == -ENOBUFS) {
|
||||
scoutfs_inc_counter(sb, data_fallocate_enobufs_retry);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
@@ -1078,8 +1086,7 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
}
|
||||
|
||||
/* we're updating meta_seq with offline block count */
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_SETATTR_MORE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1224,8 +1231,7 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
ret = scoutfs_inode_index_start(sb, &seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &locks, from, true) ?:
|
||||
scoutfs_inode_index_prepare(sb, &locks, to, true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &locks, seq,
|
||||
SIC_EXACT(1, 1));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &locks, seq);
|
||||
if (ret > 0)
|
||||
continue;
|
||||
if (ret < 0)
|
||||
|
||||
@@ -463,7 +463,18 @@ out:
|
||||
else
|
||||
inode = scoutfs_iget(sb, ino);
|
||||
|
||||
return d_splice_alias(inode, dentry);
|
||||
/*
|
||||
* We can't splice dir aliases into the dcache. dir entries
|
||||
* might have changed on other nodes so our dcache could still
|
||||
* contain them, rather than having been moved in rename. For
|
||||
* dirs, we use d_materialize_unique to remove any existing
|
||||
* aliases which must be stale. Our inode numbers aren't reused
|
||||
* so inodes pointed to by entries can't change types.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(inode) && S_ISDIR(inode->i_mode))
|
||||
return d_materialise_unique(dentry, inode);
|
||||
else
|
||||
return d_splice_alias(inode, dentry);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -655,7 +666,6 @@ static int del_entry_items(struct super_block *sb, u64 dir_ino, u64 hash,
|
||||
*/
|
||||
static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, dev_t rdev,
|
||||
const struct scoutfs_item_count cnt,
|
||||
struct scoutfs_lock **dir_lock,
|
||||
struct scoutfs_lock **inode_lock,
|
||||
struct list_head *ind_locks)
|
||||
@@ -694,7 +704,7 @@ retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, ind_locks, dir, true) ?:
|
||||
scoutfs_inode_index_prepare_ino(sb, ind_locks, ino, mode) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq, cnt);
|
||||
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -741,7 +751,6 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
|
||||
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
|
||||
inode = lock_hold_create(dir, dentry, mode, rdev,
|
||||
SIC_MKNOD(dentry->d_name.len),
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
@@ -836,8 +845,7 @@ retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_LINK(dentry->d_name.len));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -918,8 +926,7 @@ retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_UNLINK(dentry->d_name.len));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -1154,7 +1161,6 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
return ret;
|
||||
|
||||
inode = lock_hold_create(dir, dentry, S_IFLNK|S_IRWXUGO, 0,
|
||||
SIC_SYMLINK(dentry->d_name.len, name_len),
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
@@ -1586,9 +1592,7 @@ retry:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, new_dir, false)) ?:
|
||||
(new_inode == NULL ? 0 :
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, new_inode, false)) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_RENAME(old_dentry->d_name.len,
|
||||
new_dentry->d_name.len));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
|
||||
@@ -66,8 +66,8 @@ struct forest_info {
|
||||
struct forest_info *name = SCOUTFS_SB(sb)->forest_info
|
||||
|
||||
struct forest_refs {
|
||||
struct scoutfs_btree_ref fs_ref;
|
||||
struct scoutfs_btree_ref logs_ref;
|
||||
struct scoutfs_block_ref fs_ref;
|
||||
struct scoutfs_block_ref logs_ref;
|
||||
};
|
||||
|
||||
/* initialize some refs that initially aren't equal */
|
||||
@@ -96,20 +96,16 @@ static void calc_bloom_nrs(struct forest_bloom_nrs *bloom,
|
||||
}
|
||||
}
|
||||
|
||||
static struct scoutfs_block *read_bloom_ref(struct super_block *sb,
|
||||
struct scoutfs_btree_ref *ref)
|
||||
static struct scoutfs_block *read_bloom_ref(struct super_block *sb, struct scoutfs_block_ref *ref)
|
||||
{
|
||||
struct scoutfs_block *bl;
|
||||
int ret;
|
||||
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (IS_ERR(bl))
|
||||
return bl;
|
||||
|
||||
if (!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
|
||||
SCOUTFS_BLOCK_MAGIC_BLOOM)) {
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
return ERR_PTR(-ESTALE);
|
||||
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BLOOM, &bl);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE)
|
||||
scoutfs_inc_counter(sb, forest_bloom_stale);
|
||||
bl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return bl;
|
||||
@@ -381,18 +377,14 @@ out:
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
struct scoutfs_block *new_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_bloom_block *bb;
|
||||
struct scoutfs_btree_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
struct forest_bloom_nrs bloom;
|
||||
int nr_set = 0;
|
||||
u64 blkno;
|
||||
u64 nr;
|
||||
int ret;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
nr = le64_to_cpu(finf->our_log.nr);
|
||||
@@ -410,53 +402,11 @@ int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
|
||||
ref = &finf->our_log.bloom_ref;
|
||||
|
||||
if (ref->blkno) {
|
||||
bl = read_bloom_ref(sb, ref);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
goto unlock;
|
||||
}
|
||||
bb = bl->data;
|
||||
}
|
||||
|
||||
if (!ref->blkno || !scoutfs_block_writer_is_dirty(sb, bl)) {
|
||||
|
||||
ret = scoutfs_alloc_meta(sb, finf->alloc, finf->wri, &blkno);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
new_bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(new_bl)) {
|
||||
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
|
||||
blkno);
|
||||
BUG_ON(err); /* could have dirtied */
|
||||
ret = PTR_ERR(new_bl);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (bl) {
|
||||
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
|
||||
le64_to_cpu(ref->blkno));
|
||||
BUG_ON(err); /* could have dirtied */
|
||||
memcpy(new_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
|
||||
} else {
|
||||
memset(new_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
}
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, finf->wri, new_bl);
|
||||
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = new_bl;
|
||||
bb = bl->data;
|
||||
new_bl = NULL;
|
||||
|
||||
bb->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BLOOM);
|
||||
bb->hdr.fsid = super->hdr.fsid;
|
||||
bb->hdr.blkno = cpu_to_le64(blkno);
|
||||
prandom_bytes(&bb->hdr.seq, sizeof(bb->hdr.seq));
|
||||
ref->blkno = bb->hdr.blkno;
|
||||
ref->seq = bb->hdr.seq;
|
||||
}
|
||||
ret = scoutfs_block_dirty_ref(sb, finf->alloc, finf->wri, ref, SCOUTFS_BLOCK_MAGIC_BLOOM,
|
||||
&bl, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
bb = bl->data;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bloom.nrs); i++) {
|
||||
if (!test_and_set_bit_le(bloom.nrs[i], bb->bits)) {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#ifndef _SCOUTFS_FORMAT_H_
|
||||
#define _SCOUTFS_FORMAT_H_
|
||||
|
||||
#define SCOUTFS_INTEROP_VERSION 0ULL
|
||||
#define SCOUTFS_INTEROP_VERSION_STR __stringify(0)
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
|
||||
@@ -11,6 +14,7 @@
|
||||
#define SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK 0x897e4a7d
|
||||
#define SCOUTFS_BLOCK_MAGIC_SRCH_PARENT 0xb23a2a05
|
||||
#define SCOUTFS_BLOCK_MAGIC_ALLOC_LIST 0x8a93ac83
|
||||
#define SCOUTFS_BLOCK_MAGIC_QUORUM 0xbc310868
|
||||
|
||||
/*
|
||||
* The super block, quorum block, and file data allocation granularity
|
||||
@@ -51,15 +55,19 @@
|
||||
#define SCOUTFS_SUPER_BLKNO ((64ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* A reasonably large region of aligned quorum blocks follow the super
|
||||
* block. Each voting cycle reads the entire region so we don't want it
|
||||
* to be too enormous. 256K seems like a reasonably chunky single IO.
|
||||
* The number of blocks in the region also determines the number of
|
||||
* mounts that have a reasonable probability of not overwriting each
|
||||
* other's random block locations.
|
||||
* A small number of quorum blocks follow the super block, enough of
|
||||
* them to match the starting offset of the super block so the region is
|
||||
* aligned to the power of two that contains it.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_BLKNO ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
#define SCOUTFS_QUORUM_BLOCKS ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
#define SCOUTFS_QUORUM_BLKNO (SCOUTFS_SUPER_BLKNO + 1)
|
||||
#define SCOUTFS_QUORUM_BLOCKS (SCOUTFS_SUPER_BLKNO - 1)
|
||||
|
||||
/*
|
||||
* Free metadata blocks start after the quorum blocks
|
||||
*/
|
||||
#define SCOUTFS_META_DEV_START_BLKNO \
|
||||
((SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >> \
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT)
|
||||
|
||||
/*
|
||||
* Start data on the data device aligned as well.
|
||||
@@ -98,6 +106,15 @@ struct scoutfs_block_header {
|
||||
__le64 blkno;
|
||||
};
|
||||
|
||||
/*
|
||||
* A reference to a block. The corresponding fields in the block_header
|
||||
* must match after having read the block contents.
|
||||
*/
|
||||
struct scoutfs_block_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* scoutfs identifies all file system metadata items by a small key
|
||||
* struct.
|
||||
@@ -173,19 +190,6 @@ struct scoutfs_key {
|
||||
#define skfl_neglen _sk_second
|
||||
#define skfl_blkno _sk_third
|
||||
|
||||
struct scoutfs_radix_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
union {
|
||||
struct scoutfs_radix_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
__le64 sm_total;
|
||||
__le64 lg_total;
|
||||
} refs[0];
|
||||
__le64 bits[0];
|
||||
};
|
||||
};
|
||||
|
||||
struct scoutfs_avl_root {
|
||||
__le16 node;
|
||||
};
|
||||
@@ -207,17 +211,12 @@ struct scoutfs_avl_node {
|
||||
*/
|
||||
#define SCOUTFS_BTREE_MAX_HEIGHT 20
|
||||
|
||||
struct scoutfs_btree_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* A height of X means that the first block read will have level X-1 and
|
||||
* the leaves will have level 0.
|
||||
*/
|
||||
struct scoutfs_btree_root {
|
||||
struct scoutfs_btree_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
__u8 height;
|
||||
__u8 __pad[7];
|
||||
};
|
||||
@@ -258,18 +257,13 @@ struct scoutfs_btree_block {
|
||||
#define SCOUTFS_BTREE_LEAF_ITEM_HASH_BYTES \
|
||||
(SCOUTFS_BTREE_LEAF_ITEM_HASH_NR * sizeof(__le16))
|
||||
|
||||
struct scoutfs_alloc_list_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* first_nr tracks the nr of the first block in the list and is used for
|
||||
* allocation sizing. total_nr is the sum of the nr of all the blocks in
|
||||
* the list and is used for calculating total free block counts.
|
||||
*/
|
||||
struct scoutfs_alloc_list_head {
|
||||
struct scoutfs_alloc_list_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 total_nr;
|
||||
__le32 first_nr;
|
||||
__u8 __pad[4];
|
||||
@@ -288,7 +282,7 @@ struct scoutfs_alloc_list_head {
|
||||
*/
|
||||
struct scoutfs_alloc_list_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_alloc_list_ref next;
|
||||
struct scoutfs_block_ref next;
|
||||
__le32 start;
|
||||
__le32 nr;
|
||||
__le64 blknos[0]; /* naturally aligned for sorting */
|
||||
@@ -316,7 +310,7 @@ struct scoutfs_mounted_client_btree_val {
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_MOUNTED_CLIENT_VOTER (1 << 0)
|
||||
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
|
||||
|
||||
/*
|
||||
* srch files are a contiguous run of blocks with compressed entries
|
||||
@@ -334,15 +328,10 @@ struct scoutfs_srch_entry {
|
||||
|
||||
#define SCOUTFS_SRCH_ENTRY_MAX_BYTES (2 + (sizeof(__u64) * 3))
|
||||
|
||||
struct scoutfs_srch_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
struct scoutfs_srch_file {
|
||||
struct scoutfs_srch_entry first;
|
||||
struct scoutfs_srch_entry last;
|
||||
struct scoutfs_srch_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 blocks;
|
||||
__le64 entries;
|
||||
__u8 height;
|
||||
@@ -351,13 +340,13 @@ struct scoutfs_srch_file {
|
||||
|
||||
struct scoutfs_srch_parent {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_srch_ref refs[0];
|
||||
struct scoutfs_block_ref refs[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_SRCH_PARENT_REFS \
|
||||
((SCOUTFS_BLOCK_LG_SIZE - \
|
||||
offsetof(struct scoutfs_srch_parent, refs)) / \
|
||||
sizeof(struct scoutfs_srch_ref))
|
||||
sizeof(struct scoutfs_block_ref))
|
||||
|
||||
struct scoutfs_srch_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
@@ -428,7 +417,7 @@ struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root item_root;
|
||||
struct scoutfs_btree_ref bloom_ref;
|
||||
struct scoutfs_block_ref bloom_ref;
|
||||
struct scoutfs_alloc_root data_avail;
|
||||
struct scoutfs_alloc_root data_freed;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
@@ -547,56 +536,84 @@ struct scoutfs_xattr {
|
||||
|
||||
#define SCOUTFS_UUID_BYTES 16
|
||||
|
||||
/*
|
||||
* Mounts read all the quorum blocks and write to one random quorum
|
||||
* block during a cycle. The min cycle time limits the per-mount iop
|
||||
* load during elections. The random cycle delay makes it less likely
|
||||
* that mounts will read and write at the same time and miss each
|
||||
* other's writes. An election only completes if a quorum of mounts
|
||||
* vote for a leader before any of their elections timeout. This is
|
||||
* made less likely by the probability that mounts will overwrite each
|
||||
* others random block locations. The max quorum count limits that
|
||||
* probability. 9 mounts only have a 55% chance of writing to unique 4k
|
||||
* blocks in a 256k region. The election timeout is set to include
|
||||
* enough cycles to usually complete the election. Once a leader is
|
||||
* elected it spends a number of cycles writing out blocks with itself
|
||||
* logged as a leader. This reduces the possibility that servers
|
||||
* will have their log entries overwritten and not be fenced.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_MAX_COUNT 9
|
||||
#define SCOUTFS_QUORUM_CYCLE_LO_MS 10
|
||||
#define SCOUTFS_QUORUM_CYCLE_HI_MS 20
|
||||
#define SCOUTFS_QUORUM_TERM_LO_MS 250
|
||||
#define SCOUTFS_QUORUM_TERM_HI_MS 500
|
||||
#define SCOUTFS_QUORUM_ELECTED_LOG_CYCLES 10
|
||||
#define SCOUTFS_QUORUM_MAX_SLOTS 15
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
/*
|
||||
* To elect a leader, members race to have their variable election
|
||||
* timeouts expire. If they're first to send a vote request with a
|
||||
* greater term to a majority of waiting members they'll be elected with
|
||||
* a majority. If the timeouts are too close, the vote may be split and
|
||||
* everyone will wait for another cycle of variable timeouts to expire.
|
||||
*
|
||||
* These determine how long it will take to elect a leader once there's
|
||||
* no evidence of a server (no leader quorum blocks on mount; heartbeat
|
||||
* timeout expired.)
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_ELECT_MIN_MS 250
|
||||
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
|
||||
|
||||
/*
|
||||
* Once a leader is elected they send out heartbeats at regular
|
||||
* intervals to force members to wait the much longer heartbeat timeout.
|
||||
* Once heartbeat timeout expires without receiving a heartbeat they'll
|
||||
* switch over the performing elections.
|
||||
*
|
||||
* These determine how long it could take members to notice that a
|
||||
* leader has gone silent and start to elect a new leader.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct scoutfs_quorum_message {
|
||||
__le64 fsid;
|
||||
__le64 blkno;
|
||||
__le64 version;
|
||||
__le64 term;
|
||||
__le64 write_nr;
|
||||
__le64 voter_rid;
|
||||
__le64 vote_for_rid;
|
||||
__u8 type;
|
||||
__u8 from;
|
||||
__u8 __pad[2];
|
||||
__le32 crc;
|
||||
__u8 log_nr;
|
||||
__u8 __pad[3];
|
||||
struct scoutfs_quorum_log {
|
||||
__le64 term;
|
||||
__le64 rid;
|
||||
struct scoutfs_inet_addr addr;
|
||||
} log[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_LOG_MAX \
|
||||
((SCOUTFS_BLOCK_SM_SIZE - sizeof(struct scoutfs_quorum_block)) / \
|
||||
sizeof(struct scoutfs_quorum_log))
|
||||
/* a candidate requests a vote */
|
||||
#define SCOUTFS_QUORUM_MSG_REQUEST_VOTE 0
|
||||
/* followers send votes to candidates */
|
||||
#define SCOUTFS_QUORUM_MSG_VOTE 1
|
||||
/* elected leaders broadcast heartbeats to delay elections */
|
||||
#define SCOUTFS_QUORUM_MSG_HEARTBEAT 2
|
||||
/* leaders broadcast as they leave to break heartbeat timeout */
|
||||
#define SCOUTFS_QUORUM_MSG_RESIGNATION 3
|
||||
#define SCOUTFS_QUORUM_MSG_INVALID 4
|
||||
|
||||
/*
|
||||
* The version is currently always 0, but will be used by mounts to
|
||||
* discover that membership has changed.
|
||||
*/
|
||||
struct scoutfs_quorum_config {
|
||||
__le64 version;
|
||||
struct scoutfs_quorum_slot {
|
||||
struct scoutfs_inet_addr addr;
|
||||
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 term;
|
||||
__le64 random_write_mark;
|
||||
__le64 flags;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 rid;
|
||||
struct scoutfs_timespec ts;
|
||||
} write, update_term, set_leader, clear_leader, fenced;
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
|
||||
|
||||
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
|
||||
|
||||
struct scoutfs_super_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 id;
|
||||
__le64 format_hash;
|
||||
__le64 version;
|
||||
__le64 flags;
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 next_ino;
|
||||
@@ -607,12 +624,7 @@ struct scoutfs_super_block {
|
||||
__le64 total_data_blocks;
|
||||
__le64 first_data_blkno;
|
||||
__le64 last_data_blkno;
|
||||
__le64 quorum_fenced_term;
|
||||
__le64 quorum_server_term;
|
||||
__le64 unmount_barrier;
|
||||
__u8 quorum_count;
|
||||
__u8 __pad[7];
|
||||
struct scoutfs_inet_addr server_addr;
|
||||
struct scoutfs_quorum_config qconf;
|
||||
struct scoutfs_alloc_root meta_alloc[2];
|
||||
struct scoutfs_alloc_root data_alloc;
|
||||
struct scoutfs_alloc_list_head server_meta_avail[2];
|
||||
@@ -746,12 +758,6 @@ enum scoutfs_dentry_type {
|
||||
* the same serer after receiving a greeting response and to a new
|
||||
* server after failover.
|
||||
*
|
||||
* @unmount_barrier: Incremented every time the remaining majority of
|
||||
* quorum members all agree to leave. The server tells a quorum member
|
||||
* the value that it's connecting under so that if the client sees the
|
||||
* value increase in the super block then it knows that the server has
|
||||
* processed its farewell and can safely unmount.
|
||||
*
|
||||
* @rid: The client's random id that was generated once as the mount
|
||||
* started up. This identifies a specific remote mount across
|
||||
* connections and servers. It's set to the client's rid in both the
|
||||
@@ -759,15 +765,14 @@ enum scoutfs_dentry_type {
|
||||
*/
|
||||
struct scoutfs_net_greeting {
|
||||
__le64 fsid;
|
||||
__le64 format_hash;
|
||||
__le64 version;
|
||||
__le64 server_term;
|
||||
__le64 unmount_barrier;
|
||||
__le64 rid;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_GREETING_FLAG_FAREWELL (1 << 0)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_VOTER (1 << 1)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_QUORUM (1 << 1)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_INVALID (~(__u64)0 << 2)
|
||||
|
||||
/*
|
||||
|
||||
@@ -343,8 +343,7 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return 0;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true,
|
||||
SIC_DIRTY_INODE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -371,8 +370,7 @@ static int clear_truncate_flag(struct inode *inode, struct scoutfs_lock *lock)
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_DIRTY_INODE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -487,8 +485,7 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_DIRTY_INODE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1189,8 +1186,7 @@ int scoutfs_inode_index_start(struct super_block *sb, u64 *seq)
|
||||
* Returns > 0 if the seq changed and the locks should be retried.
|
||||
*/
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq,
|
||||
const struct scoutfs_item_count cnt)
|
||||
struct list_head *list, u64 seq)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct index_lock *ind_lock;
|
||||
@@ -1206,7 +1202,7 @@ int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_hold_trans(sb, cnt);
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (ret == 0 && seq != sbi->trans_seq) {
|
||||
scoutfs_release_trans(sb);
|
||||
ret = 1;
|
||||
@@ -1220,8 +1216,7 @@ out:
|
||||
}
|
||||
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq,
|
||||
const struct scoutfs_item_count cnt)
|
||||
bool set_data_seq)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int ret;
|
||||
@@ -1231,7 +1226,7 @@ int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
ret = scoutfs_inode_index_start(sb, &seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, list, inode,
|
||||
set_data_seq) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, list, seq, cnt);
|
||||
scoutfs_inode_index_try_lock_hold(sb, list, seq);
|
||||
} while (ret > 0);
|
||||
|
||||
return ret;
|
||||
@@ -1499,8 +1494,7 @@ static int delete_inode_items(struct super_block *sb, u64 ino)
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
prepare_index_deletion(sb, &ind_locks, ino, mode, &sinode) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_DROP_INODE(mode, size));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -1627,19 +1621,28 @@ int scoutfs_orphan_inode(struct inode *inode)
|
||||
}
|
||||
|
||||
/*
|
||||
* Track an inode that could have dirty pages. Used to kick off writeback
|
||||
* on all dirty pages during transaction commit without tying ourselves in
|
||||
* knots trying to call through the high level vfs sync methods.
|
||||
* Track an inode that could have dirty pages. Used to kick off
|
||||
* writeback on all dirty pages during transaction commit without tying
|
||||
* ourselves in knots trying to call through the high level vfs sync
|
||||
* methods.
|
||||
*
|
||||
* This is called by writers who hold the inode and transaction. The
|
||||
* inode's presence in the rbtree is removed by destroy_inode, prevented
|
||||
* by the inode hold, and by committing the transaction, which is
|
||||
* prevented by holding the transaction. The inode can only go from
|
||||
* empty to on the rbtree while we're here.
|
||||
*/
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode)
|
||||
{
|
||||
DECLARE_INODE_SB_INFO(inode->i_sb, inf);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
spin_lock(&inf->writeback_lock);
|
||||
if (RB_EMPTY_NODE(&si->writeback_node))
|
||||
insert_writeback_inode(inf, si);
|
||||
spin_unlock(&inf->writeback_lock);
|
||||
if (RB_EMPTY_NODE(&si->writeback_node)) {
|
||||
spin_lock(&inf->writeback_lock);
|
||||
if (RB_EMPTY_NODE(&si->writeback_node))
|
||||
insert_writeback_inode(inf, si);
|
||||
spin_unlock(&inf->writeback_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include "key.h"
|
||||
#include "lock.h"
|
||||
#include "per_task.h"
|
||||
#include "count.h"
|
||||
#include "format.h"
|
||||
#include "data.h"
|
||||
|
||||
@@ -83,11 +82,9 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
|
||||
struct list_head *list, u64 ino,
|
||||
umode_t mode);
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq,
|
||||
const struct scoutfs_item_count cnt);
|
||||
struct list_head *list, u64 seq);
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq,
|
||||
const struct scoutfs_item_count cnt);
|
||||
bool set_data_seq);
|
||||
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
|
||||
|
||||
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);
|
||||
|
||||
@@ -674,8 +674,7 @@ static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
set_data_seq = sm.data_version != 0 ? true : false;
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq,
|
||||
SIC_SETATTR_MORE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
|
||||
@@ -1339,7 +1339,10 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
/* split needs multiple items, sparse may not have enough */
|
||||
if (!left)
|
||||
return -ENOMEM;
|
||||
|
||||
compact_page_items(sb, pg, left);
|
||||
found = item_rbtree_walk(&pg->item_root, key, NULL, &par,
|
||||
&pnode);
|
||||
}
|
||||
|
||||
item = alloc_item(pg, key, liv, val, val_len);
|
||||
@@ -1491,6 +1494,8 @@ retry:
|
||||
rbtree_erase(&rd->node, &root);
|
||||
rbtree_insert(&rd->node, par, pnode, &cinf->pg_root);
|
||||
lru_accessed(sb, cinf, rd);
|
||||
trace_scoutfs_item_read_page(sb, key, &rd->start,
|
||||
&rd->end);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2342,6 +2347,8 @@ retry:
|
||||
write_lock(&pg->rwlock);
|
||||
|
||||
pgi = trim_page_intersection(sb, cinf, pg, right, start, end);
|
||||
trace_scoutfs_item_invalidate_page(sb, start, end,
|
||||
&pg->start, &pg->end, pgi);
|
||||
BUG_ON(pgi == PGI_DISJOINT); /* walk wouldn't ret disjoint */
|
||||
|
||||
if (pgi == PGI_INSIDE) {
|
||||
@@ -2364,9 +2371,9 @@ retry:
|
||||
/* inv was entirely inside page, done after bisect */
|
||||
write_trylock_will_succeed(&right->rwlock);
|
||||
rbtree_insert(&right->node, par, pnode, &cinf->pg_root);
|
||||
lru_accessed(sb, cinf, right);
|
||||
write_unlock(&right->rwlock);
|
||||
write_unlock(&pg->rwlock);
|
||||
lru_accessed(sb, cinf, right);
|
||||
right = NULL;
|
||||
break;
|
||||
}
|
||||
@@ -2396,7 +2403,6 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
struct active_reader *active;
|
||||
struct cached_page *tmp;
|
||||
struct cached_page *pg;
|
||||
LIST_HEAD(list);
|
||||
int nr;
|
||||
|
||||
if (sc->nr_to_scan == 0)
|
||||
@@ -2433,21 +2439,17 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
|
||||
__lru_remove(sb, cinf, pg);
|
||||
rbtree_erase(&pg->node, &cinf->pg_root);
|
||||
list_move_tail(&pg->lru_head, &list);
|
||||
invalidate_pcpu_page(pg);
|
||||
write_unlock(&pg->rwlock);
|
||||
|
||||
put_pg(sb, pg);
|
||||
|
||||
if (--nr == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock(&cinf->rwlock);
|
||||
spin_unlock(&cinf->lru_lock);
|
||||
|
||||
list_for_each_entry_safe(pg, tmp, &list, lru_head) {
|
||||
list_del_init(&pg->lru_head);
|
||||
put_pg(sb, pg);
|
||||
}
|
||||
out:
|
||||
return min_t(unsigned long, cinf->lru_pages, INT_MAX);
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
* relative to that lock state we resend.
|
||||
*/
|
||||
|
||||
#define GRACE_PERIOD_KT ms_to_ktime(2)
|
||||
#define GRACE_PERIOD_KT ms_to_ktime(10)
|
||||
|
||||
/*
|
||||
* allocated per-super, freed on unmount.
|
||||
@@ -770,16 +770,6 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->inv_list, inv_head) {
|
||||
nl = &lock->inv_nl;
|
||||
|
||||
/* skip if grace hasn't elapsed, record earliest */
|
||||
deadline = lock->grace_deadline;
|
||||
if (ktime_before(now, deadline)) {
|
||||
delay = min(delay,
|
||||
nsecs_to_jiffies(ktime_to_ns(
|
||||
ktime_sub(deadline, now))));
|
||||
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* wait for reordered grant to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
continue;
|
||||
@@ -788,6 +778,15 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
if (!lock_counts_match(nl->new_mode, lock->users))
|
||||
continue;
|
||||
|
||||
/* skip if grace hasn't elapsed, record earliest */
|
||||
deadline = lock->grace_deadline;
|
||||
if (!linfo->shutdown && ktime_before(now, deadline)) {
|
||||
delay = min(delay,
|
||||
nsecs_to_jiffies(ktime_to_ns(
|
||||
ktime_sub(deadline, now))));
|
||||
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
|
||||
continue;
|
||||
}
|
||||
/* set the new mode, no incompatible users during inval */
|
||||
lock->mode = nl->new_mode;
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include "super.h"
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_server_addr, "server_addr=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
@@ -43,46 +43,6 @@ u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The caller's string is null terminted and can be clobbered */
|
||||
static int parse_ipv4(struct super_block *sb, char *str,
|
||||
struct sockaddr_in *sin)
|
||||
{
|
||||
unsigned long port = 0;
|
||||
__be32 addr;
|
||||
char *c;
|
||||
int ret;
|
||||
|
||||
/* null term port, if specified */
|
||||
c = strchr(str, ':');
|
||||
if (c)
|
||||
*c = '\0';
|
||||
|
||||
/* parse addr */
|
||||
addr = in_aton(str);
|
||||
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
|
||||
ipv4_is_zeronet(addr) ||
|
||||
ipv4_is_local_multicast(addr)) {
|
||||
scoutfs_err(sb, "invalid unicast ipv4 address: %s", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* parse port, if specified */
|
||||
if (c) {
|
||||
c++;
|
||||
ret = kstrtoul(c, 0, &port);
|
||||
if (ret != 0 || port == 0 || port >= U16_MAX) {
|
||||
scoutfs_err(sb, "invalid port in ipv4 address: %s", c);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_addr.s_addr = addr;
|
||||
sin->sin_port = cpu_to_be16(port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
{
|
||||
@@ -132,14 +92,15 @@ out:
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
{
|
||||
char ipstr[INET_ADDRSTRLEN + 1];
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int nr;
|
||||
int token;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
@@ -147,12 +108,23 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_server_addr:
|
||||
case Opt_quorum_slot_nr:
|
||||
|
||||
match_strlcpy(ipstr, args, ARRAY_SIZE(ipstr));
|
||||
ret = parse_ipv4(sb, ipstr, &parsed->server_addr);
|
||||
if (ret < 0)
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
parsed->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@
|
||||
#include "format.h"
|
||||
|
||||
enum scoutfs_mount_options {
|
||||
Opt_server_addr,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
struct mount_options {
|
||||
struct sockaddr_in server_addr;
|
||||
int quorum_slot_nr;
|
||||
char *metadev_path;
|
||||
};
|
||||
|
||||
|
||||
1599
kmod/src/quorum.c
1599
kmod/src/quorum.c
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,15 @@
|
||||
#ifndef _SCOUTFS_QUORUM_H_
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_election(struct super_block *sb, ktime_t timeout_abs,
|
||||
u64 prev_term, u64 *elected_term);
|
||||
void scoutfs_quorum_clear_leader(struct super_block *sb);
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -31,7 +31,6 @@
|
||||
#include "lock.h"
|
||||
#include "super.h"
|
||||
#include "ioctl.h"
|
||||
#include "count.h"
|
||||
#include "export.h"
|
||||
#include "dir.h"
|
||||
#include "server.h"
|
||||
@@ -424,135 +423,34 @@ TRACE_EVENT(scoutfs_trans_write_func,
|
||||
TP_printk(SCSBF" dirty %lu", SCSB_TRACE_ARGS, __entry->dirty)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_release_trans,
|
||||
TP_PROTO(struct super_block *sb, void *rsv, unsigned int rsv_holders,
|
||||
struct scoutfs_item_count *res,
|
||||
struct scoutfs_item_count *act, unsigned int tri_holders,
|
||||
unsigned int tri_writing, unsigned int tri_items,
|
||||
unsigned int tri_vals),
|
||||
DECLARE_EVENT_CLASS(scoutfs_trans_hold_release_class,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
|
||||
TP_ARGS(sb, rsv, rsv_holders, res, act, tri_holders, tri_writing,
|
||||
tri_items, tri_vals),
|
||||
TP_ARGS(sb, journal_info, holders),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, rsv)
|
||||
__field(unsigned int, rsv_holders)
|
||||
__field(int, res_items)
|
||||
__field(int, res_vals)
|
||||
__field(int, act_items)
|
||||
__field(int, act_vals)
|
||||
__field(unsigned int, tri_holders)
|
||||
__field(unsigned int, tri_writing)
|
||||
__field(unsigned int, tri_items)
|
||||
__field(unsigned int, tri_vals)
|
||||
__field(unsigned long, journal_info)
|
||||
__field(int, holders)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->rsv = rsv;
|
||||
__entry->rsv_holders = rsv_holders;
|
||||
__entry->res_items = res->items;
|
||||
__entry->res_vals = res->vals;
|
||||
__entry->act_items = act->items;
|
||||
__entry->act_vals = act->vals;
|
||||
__entry->tri_holders = tri_holders;
|
||||
__entry->tri_writing = tri_writing;
|
||||
__entry->tri_items = tri_items;
|
||||
__entry->tri_vals = tri_vals;
|
||||
__entry->journal_info = (unsigned long)journal_info;
|
||||
__entry->holders = holders;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rsv %p holders %u reserved %u.%u actual "
|
||||
"%d.%d, trans holders %u writing %u reserved "
|
||||
"%u.%u", SCSB_TRACE_ARGS, __entry->rsv, __entry->rsv_holders,
|
||||
__entry->res_items, __entry->res_vals, __entry->act_items,
|
||||
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
|
||||
__entry->tri_items, __entry->tri_vals)
|
||||
TP_printk(SCSBF" journal_info 0x%0lx holders %d",
|
||||
SCSB_TRACE_ARGS, __entry->journal_info, __entry->holders)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_acquired_hold,
|
||||
TP_PROTO(struct super_block *sb, const struct scoutfs_item_count *cnt,
|
||||
void *rsv, unsigned int rsv_holders,
|
||||
struct scoutfs_item_count *res,
|
||||
struct scoutfs_item_count *act, unsigned int tri_holders,
|
||||
unsigned int tri_writing, unsigned int tri_items,
|
||||
unsigned int tri_vals),
|
||||
|
||||
TP_ARGS(sb, cnt, rsv, rsv_holders, res, act, tri_holders, tri_writing,
|
||||
tri_items, tri_vals),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, cnt_items)
|
||||
__field(int, cnt_vals)
|
||||
__field(void *, rsv)
|
||||
__field(unsigned int, rsv_holders)
|
||||
__field(int, res_items)
|
||||
__field(int, res_vals)
|
||||
__field(int, act_items)
|
||||
__field(int, act_vals)
|
||||
__field(unsigned int, tri_holders)
|
||||
__field(unsigned int, tri_writing)
|
||||
__field(unsigned int, tri_items)
|
||||
__field(unsigned int, tri_vals)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->cnt_items = cnt->items;
|
||||
__entry->cnt_vals = cnt->vals;
|
||||
__entry->rsv = rsv;
|
||||
__entry->rsv_holders = rsv_holders;
|
||||
__entry->res_items = res->items;
|
||||
__entry->res_vals = res->vals;
|
||||
__entry->act_items = act->items;
|
||||
__entry->act_vals = act->vals;
|
||||
__entry->tri_holders = tri_holders;
|
||||
__entry->tri_writing = tri_writing;
|
||||
__entry->tri_items = tri_items;
|
||||
__entry->tri_vals = tri_vals;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" cnt %u.%u, rsv %p holders %u reserved %u.%u "
|
||||
"actual %d.%d, trans holders %u writing %u reserved "
|
||||
"%u.%u", SCSB_TRACE_ARGS, __entry->cnt_items,
|
||||
__entry->cnt_vals, __entry->rsv, __entry->rsv_holders,
|
||||
__entry->res_items, __entry->res_vals, __entry->act_items,
|
||||
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
|
||||
__entry->tri_items, __entry->tri_vals)
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_trans_acquired_hold,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_ARGS(sb, journal_info, holders)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_track_item,
|
||||
TP_PROTO(struct super_block *sb, int delta_items, int delta_vals,
|
||||
int act_items, int act_vals, int res_items, int res_vals),
|
||||
|
||||
TP_ARGS(sb, delta_items, delta_vals, act_items, act_vals, res_items,
|
||||
res_vals),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, delta_items)
|
||||
__field(int, delta_vals)
|
||||
__field(int, act_items)
|
||||
__field(int, act_vals)
|
||||
__field(int, res_items)
|
||||
__field(int, res_vals)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->delta_items = delta_items;
|
||||
__entry->delta_vals = delta_vals;
|
||||
__entry->act_items = act_items;
|
||||
__entry->act_vals = act_vals;
|
||||
__entry->res_items = res_items;
|
||||
__entry->res_vals = res_vals;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" delta_items %d delta_vals %d act_items %d act_vals %d res_items %d res_vals %d",
|
||||
SCSB_TRACE_ARGS, __entry->delta_items, __entry->delta_vals,
|
||||
__entry->act_items, __entry->act_vals, __entry->res_items,
|
||||
__entry->res_vals)
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_release_trans,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_ARGS(sb, journal_info, holders)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_ioc_release,
|
||||
@@ -1686,7 +1584,7 @@ TRACE_EVENT(scoutfs_get_name,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_read_error,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_ref *ref),
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *ref),
|
||||
|
||||
TP_ARGS(sb, ref),
|
||||
|
||||
@@ -1706,37 +1604,10 @@ TRACE_EVENT(scoutfs_btree_read_error,
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_dirty_block,
|
||||
TP_PROTO(struct super_block *sb, u64 blkno, u64 seq,
|
||||
u64 bt_blkno, u64 bt_seq),
|
||||
|
||||
TP_ARGS(sb, blkno, seq, bt_blkno, bt_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(__u64, seq)
|
||||
__field(__u64, bt_blkno)
|
||||
__field(__u64, bt_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = blkno;
|
||||
__entry->seq = seq;
|
||||
__entry->bt_blkno = bt_blkno;
|
||||
__entry->bt_seq = bt_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu seq %llu bt_blkno %llu bt_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq,
|
||||
__entry->bt_blkno, __entry->bt_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_walk,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key, int flags, int level,
|
||||
struct scoutfs_btree_ref *ref),
|
||||
struct scoutfs_block_ref *ref),
|
||||
|
||||
TP_ARGS(sb, root, key, flags, level, ref),
|
||||
|
||||
@@ -1872,118 +1743,69 @@ TRACE_EVENT(scoutfs_lock_message,
|
||||
__entry->old_mode, __entry->new_mode)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quorum_message_class,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election,
|
||||
TP_PROTO(struct super_block *sb, u64 prev_term),
|
||||
|
||||
TP_ARGS(sb, prev_term),
|
||||
TP_ARGS(sb, term, type, nr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, prev_term)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->prev_term = prev_term;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" prev_term %llu",
|
||||
SCSB_TRACE_ARGS, __entry->prev_term)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election_ret,
|
||||
TP_PROTO(struct super_block *sb, int ret, u64 elected_term),
|
||||
|
||||
TP_ARGS(sb, ret, elected_term),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, ret)
|
||||
__field(__u64, elected_term)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ret = ret;
|
||||
__entry->elected_term = elected_term;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ret %d elected_term %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ret, __entry->elected_term)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election_vote,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, u64 vote_for_rid,
|
||||
int votes, int log_cycles, int quorum_count),
|
||||
|
||||
TP_ARGS(sb, role, term, vote_for_rid, votes, log_cycles, quorum_count),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, role)
|
||||
__field(__u64, term)
|
||||
__field(__u64, vote_for_rid)
|
||||
__field(int, votes)
|
||||
__field(int, log_cycles)
|
||||
__field(int, quorum_count)
|
||||
__field(__u8, type)
|
||||
__field(int, nr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->role = role;
|
||||
__entry->term = term;
|
||||
__entry->vote_for_rid = vote_for_rid;
|
||||
__entry->votes = votes;
|
||||
__entry->log_cycles = log_cycles;
|
||||
__entry->quorum_count = quorum_count;
|
||||
__entry->type = type;
|
||||
__entry->nr = nr;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" role %d term %llu vote_for_rid %016llx votes %d log_cycles %d quorum_count %d",
|
||||
SCSB_TRACE_ARGS, __entry->role, __entry->term,
|
||||
__entry->vote_for_rid, __entry->votes, __entry->log_cycles,
|
||||
__entry->quorum_count)
|
||||
TP_printk(SCSBF" term %llu type %u nr %d",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->type, __entry->nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_send_message,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
TP_ARGS(sb, term, type, nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_recv_message,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
TP_ARGS(sb, term, type, nr)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quorum_block_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TRACE_EVENT(scoutfs_quorum_loop,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, int vote_for,
|
||||
unsigned long vote_bits, struct timespec64 timeout),
|
||||
|
||||
TP_ARGS(sb, blk),
|
||||
TP_ARGS(sb, role, term, vote_for, vote_bits, timeout),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(__u64, term)
|
||||
__field(__u64, write_nr)
|
||||
__field(__u64, voter_rid)
|
||||
__field(__u64, vote_for_rid)
|
||||
__field(__u32, crc)
|
||||
__field(__u8, log_nr)
|
||||
__field(int, role)
|
||||
__field(int, vote_for)
|
||||
__field(unsigned long, vote_bits)
|
||||
__field(unsigned long, vote_count)
|
||||
__field(unsigned long long, timeout_sec)
|
||||
__field(int, timeout_nsec)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = le64_to_cpu(blk->blkno);
|
||||
__entry->term = le64_to_cpu(blk->term);
|
||||
__entry->write_nr = le64_to_cpu(blk->write_nr);
|
||||
__entry->voter_rid = le64_to_cpu(blk->voter_rid);
|
||||
__entry->vote_for_rid = le64_to_cpu(blk->vote_for_rid);
|
||||
__entry->crc = le32_to_cpu(blk->crc);
|
||||
__entry->log_nr = blk->log_nr;
|
||||
__entry->term = term;
|
||||
__entry->role = role;
|
||||
__entry->vote_for = vote_for;
|
||||
__entry->vote_bits = vote_bits;
|
||||
__entry->vote_count = hweight_long(vote_bits);
|
||||
__entry->timeout_sec = timeout.tv_sec;
|
||||
__entry->timeout_nsec = timeout.tv_nsec;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu term %llu write_nr %llu voter_rid %016llx vote_for_rid %016llx crc 0x%08x log_nr %u",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->term,
|
||||
__entry->write_nr, __entry->voter_rid, __entry->vote_for_rid,
|
||||
__entry->crc, __entry->log_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_read_block,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TP_ARGS(sb, blk)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_write_block,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TP_ARGS(sb, blk)
|
||||
TP_printk(SCSBF" term %llu role %d vote_for %d vote_bits 0x%lx vote_count %lu timeout %llu.%u",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->role,
|
||||
__entry->vote_for, __entry->vote_bits, __entry->vote_count,
|
||||
__entry->timeout_sec, __entry->timeout_nsec)
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -2013,31 +1835,27 @@ DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_recv_clock_sync,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_advance,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 prev_seq,
|
||||
u64 next_seq),
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, prev_seq, next_seq),
|
||||
TP_ARGS(sb, rid, trans_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
__field(__u64, prev_seq)
|
||||
__field(__u64, next_seq)
|
||||
__field(__u64, trans_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
__entry->prev_seq = prev_seq;
|
||||
__entry->next_seq = next_seq;
|
||||
__entry->trans_seq = trans_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx prev_seq %llu next_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->prev_seq,
|
||||
__entry->next_seq)
|
||||
TP_printk(SCSBF" rid %016llx trans_seq %llu\n",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_farewell,
|
||||
TRACE_EVENT(scoutfs_trans_seq_remove,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, trans_seq),
|
||||
@@ -2117,8 +1935,8 @@ DEFINE_EVENT(scoutfs_forest_bloom_class, scoutfs_forest_bloom_search,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_forest_prepare_commit,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_ref *item_ref,
|
||||
struct scoutfs_btree_ref *bloom_ref),
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *item_ref,
|
||||
struct scoutfs_block_ref *bloom_ref),
|
||||
TP_ARGS(sb, item_ref, bloom_ref),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
@@ -2184,18 +2002,45 @@ TRACE_EVENT(scoutfs_forest_init_our_log,
|
||||
__entry->blkno, __entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_block_dirty_ref,
|
||||
TP_PROTO(struct super_block *sb, u64 ref_blkno, u64 ref_seq,
|
||||
u64 block_blkno, u64 block_seq),
|
||||
|
||||
TP_ARGS(sb, ref_blkno, ref_seq, block_blkno, block_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ref_blkno)
|
||||
__field(__u64, ref_seq)
|
||||
__field(__u64, block_blkno)
|
||||
__field(__u64, block_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ref_blkno = ref_blkno;
|
||||
__entry->ref_seq = ref_seq;
|
||||
__entry->block_blkno = block_blkno;
|
||||
__entry->block_seq = block_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ref_blkno %llu ref_seq %llu block_blkno %llu block_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ref_blkno, __entry->ref_seq,
|
||||
__entry->block_blkno, __entry->block_seq)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved),
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno, int refcount, int io_count,
|
||||
unsigned long bits, __u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, bp)
|
||||
__field(__u64, blkno)
|
||||
__field(int, refcount)
|
||||
__field(int, io_count)
|
||||
__field(unsigned long, bits)
|
||||
__field(__u64, lru_moved)
|
||||
__field(long, bits)
|
||||
__field(__u64, accessed)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
@@ -2204,57 +2049,71 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
__entry->refcount = refcount;
|
||||
__entry->io_count = io_count;
|
||||
__entry->bits = bits;
|
||||
__entry->lru_moved = lru_moved;
|
||||
__entry->accessed = accessed;
|
||||
),
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx lru_moved %llu",
|
||||
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno,
|
||||
__entry->refcount, __entry->io_count, __entry->bits,
|
||||
__entry->lru_moved)
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx accessed %llu",
|
||||
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno, __entry->refcount,
|
||||
__entry->io_count, __entry->bits, __entry->accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_allocate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_free,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_insert,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_remove,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_end_io,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_submit,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_invalidate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_mark_dirty,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_forget,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_shrink,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_ext_next_class,
|
||||
@@ -2496,6 +2355,53 @@ TRACE_EVENT(scoutfs_alloc_move,
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_item_read_page,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *pg_start, struct scoutfs_key *pg_end),
|
||||
TP_ARGS(sb, key, pg_start, pg_end),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(key)
|
||||
sk_trace_define(pg_start)
|
||||
sk_trace_define(pg_end)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(key, key);
|
||||
sk_trace_assign(pg_start, pg_start);
|
||||
sk_trace_assign(pg_end, pg_end);
|
||||
),
|
||||
TP_printk(SCSBF" key "SK_FMT" pg_start "SK_FMT" pg_end "SK_FMT,
|
||||
SCSB_TRACE_ARGS, sk_trace_args(key), sk_trace_args(pg_start),
|
||||
sk_trace_args(pg_end))
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_item_invalidate_page,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end, struct scoutfs_key *pg_start,
|
||||
struct scoutfs_key *pg_end, int pgi),
|
||||
TP_ARGS(sb, start, end, pg_start, pg_end, pgi),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
sk_trace_define(pg_start)
|
||||
sk_trace_define(pg_end)
|
||||
__field(int, pgi)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
sk_trace_assign(pg_start, pg_start);
|
||||
sk_trace_assign(pg_end, pg_end);
|
||||
__entry->pgi = pgi;
|
||||
),
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" pg_start "SK_FMT" pg_end "SK_FMT" pgi %d",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end),
|
||||
sk_trace_args(pg_start), sk_trace_args(pg_end), __entry->pgi)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_SCOUTFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
@@ -59,7 +59,6 @@ struct server_info {
|
||||
int err;
|
||||
bool shutting_down;
|
||||
struct completion start_comp;
|
||||
struct sockaddr_in listen_sin;
|
||||
u64 term;
|
||||
struct scoutfs_net_connection *conn;
|
||||
|
||||
@@ -75,7 +74,7 @@ struct server_info {
|
||||
unsigned long nr_clients;
|
||||
|
||||
/* track clients waiting in unmmount for farewell response */
|
||||
struct mutex farewell_mutex;
|
||||
spinlock_t farewell_lock;
|
||||
struct list_head farewell_requests;
|
||||
struct work_struct farewell_work;
|
||||
|
||||
@@ -92,6 +91,7 @@ struct server_info {
|
||||
|
||||
struct mutex logs_mutex;
|
||||
struct mutex srch_mutex;
|
||||
struct mutex mounted_clients_mutex;
|
||||
|
||||
/* stable versions stored from commits, given in locks and rpcs */
|
||||
seqcount_t roots_seqcount;
|
||||
@@ -649,79 +649,10 @@ static void init_trans_seq_key(struct scoutfs_key *key, u64 seq, u64 rid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the client the next sequence number for their transaction. They
|
||||
* provide their previous transaction sequence number that they've
|
||||
* committed.
|
||||
*
|
||||
* We track the sequence numbers of transactions that clients have open.
|
||||
* This limits the transaction sequence numbers that can be returned in
|
||||
* the index of inodes by meta and data transaction numbers. We
|
||||
* communicate the largest possible sequence number to clients via an
|
||||
* rpc.
|
||||
*
|
||||
* The transaction sequence tracking is stored in a btree so it is
|
||||
* shared across servers. Final entries are removed when processing a
|
||||
* client's farewell or when it's removed.
|
||||
* Remove all trans_seq items owned by the client rid, the caller holds
|
||||
* the seq_rwsem.
|
||||
*/
|
||||
static int server_advance_seq(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
__le64 their_seq;
|
||||
__le64 next_seq;
|
||||
u64 rid = scoutfs_net_client_rid(conn);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (arg_len != sizeof(__le64)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(&their_seq, arg, sizeof(their_seq));
|
||||
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
|
||||
if (their_seq != 0) {
|
||||
init_trans_seq_key(&key, le64_to_cpu(their_seq), rid);
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc, &server->wri,
|
||||
&super->trans_seqs, &key);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
next_seq = super->next_trans_seq;
|
||||
le64_add_cpu(&super->next_trans_seq, 1);
|
||||
|
||||
trace_scoutfs_trans_seq_advance(sb, rid, le64_to_cpu(their_seq),
|
||||
le64_to_cpu(next_seq));
|
||||
|
||||
init_trans_seq_key(&key, le64_to_cpu(next_seq), rid);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->trans_seqs, &key, NULL, 0);
|
||||
unlock:
|
||||
up_write(&server->seq_rwsem);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
|
||||
out:
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
&next_seq, sizeof(next_seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove any transaction sequences owned by the client. They must have
|
||||
* committed any final transaction by the time they get here via sending
|
||||
* their farewell message. This can be called multiple times as the
|
||||
* client's farewell is retransmitted so it's OK to not find any
|
||||
* entries. This is called with the server commit rwsem held.
|
||||
*/
|
||||
static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
static int remove_trans_seq_locked(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -730,8 +661,6 @@ static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
|
||||
init_trans_seq_key(&key, 0, 0);
|
||||
|
||||
for (;;) {
|
||||
@@ -746,17 +675,102 @@ static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
|
||||
if (le64_to_cpu(key.skts_rid) == rid) {
|
||||
trace_scoutfs_trans_seq_farewell(sb, rid,
|
||||
trace_scoutfs_trans_seq_remove(sb, rid,
|
||||
le64_to_cpu(key.skts_trans_seq));
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc,
|
||||
&server->wri,
|
||||
&super->trans_seqs, &key);
|
||||
break;
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the client the next sequence number for the transaction that
|
||||
* they're opening.
|
||||
*
|
||||
* We track the sequence numbers of transactions that clients have open.
|
||||
* This limits the transaction sequence numbers that can be returned in
|
||||
* the index of inodes by meta and data transaction numbers. We
|
||||
* communicate the largest possible sequence number to clients via an
|
||||
* rpc.
|
||||
*
|
||||
* The transaction sequence tracking is stored in a btree so it is
|
||||
* shared across servers. Final entries are removed when processing a
|
||||
* client's farewell or when it's removed. We can be processent a
|
||||
* resent request that was committed by a previous server before the
|
||||
* reply was lost. At this point the client has no transactions open
|
||||
* and may or may not have just finished one. To keep it simple we
|
||||
* always remove any previous seq items, if there are any, and then
|
||||
* insert a new item for the client at the next greatest seq.
|
||||
*/
|
||||
static int server_advance_seq(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
u64 rid = scoutfs_net_client_rid(conn);
|
||||
struct scoutfs_key key;
|
||||
__le64 leseq = 0;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
if (arg_len != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
|
||||
ret = remove_trans_seq_locked(sb, rid);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
seq = le64_to_cpu(super->next_trans_seq);
|
||||
le64_add_cpu(&super->next_trans_seq, 1);
|
||||
|
||||
trace_scoutfs_trans_seq_advance(sb, rid, seq);
|
||||
|
||||
init_trans_seq_key(&key, seq, rid);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->trans_seqs, &key, NULL, 0);
|
||||
if (ret == 0)
|
||||
leseq = cpu_to_le64(seq);
|
||||
unlock:
|
||||
up_write(&server->seq_rwsem);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
|
||||
out:
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
&leseq, sizeof(leseq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove any transaction sequences owned by the client who's sent a
|
||||
* farewell They must have committed any final transaction by the time
|
||||
* they get here via sending their farewell message. This can be called
|
||||
* multiple times as the client's farewell is retransmitted so it's OK
|
||||
* to not find any entries. This is called with the server commit rwsem
|
||||
* held.
|
||||
*/
|
||||
static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
int ret = 0;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
ret = remove_trans_seq_locked(sb, rid);
|
||||
up_write(&server->seq_rwsem);
|
||||
|
||||
return ret;
|
||||
@@ -1017,15 +1031,20 @@ static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_mounted_client_btree_val mcv;
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_mounted_client_key(&key, rid);
|
||||
mcv.flags = 0;
|
||||
if (gr_flags & SCOUTFS_NET_GREETING_FLAG_VOTER)
|
||||
mcv.flags |= SCOUTFS_MOUNTED_CLIENT_VOTER;
|
||||
if (gr_flags & SCOUTFS_NET_GREETING_FLAG_QUORUM)
|
||||
mcv.flags |= SCOUTFS_MOUNTED_CLIENT_QUORUM;
|
||||
|
||||
return scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key, &mcv,
|
||||
sizeof(mcv));
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key, &mcv,
|
||||
sizeof(mcv));
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1033,9 +1052,6 @@ static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
* removed if we're processing a farewell on behalf of a client that
|
||||
* already had a previous server process its farewell.
|
||||
*
|
||||
* When we remove the last mounted client that's voting we write a new
|
||||
* quorum block with the updated unmount_barrier.
|
||||
*
|
||||
* The caller has to serialize with farewell processing.
|
||||
*/
|
||||
static int delete_mounted_client(struct super_block *sb, u64 rid)
|
||||
@@ -1047,8 +1063,10 @@ static int delete_mounted_client(struct super_block *sb, u64 rid)
|
||||
|
||||
init_mounted_client_key(&key, rid);
|
||||
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
@@ -1096,6 +1114,20 @@ static int cancel_srch_compact(struct super_block *sb, u64 rid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Farewell processing is async to the request processing work. Shutdown
|
||||
* waits for request processing to finish and then tears down the connection.
|
||||
* We don't want to queue farewell processing once we start shutting down
|
||||
* so that we don't have farewell processing racing with the connecting
|
||||
* being shutdown. If a mount's farewell message is dropped by a server
|
||||
* it will be processed by the next server.
|
||||
*/
|
||||
static void queue_farewell_work(struct server_info *server)
|
||||
{
|
||||
if (!server->shutting_down)
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process an incoming greeting request in the server from the client.
|
||||
* We try to send responses to failed greetings so that the sender can
|
||||
@@ -1121,7 +1153,6 @@ static int server_greeting(struct super_block *sb,
|
||||
struct scoutfs_net_greeting *gr = arg;
|
||||
struct scoutfs_net_greeting greet;
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
__le64 umb = 0;
|
||||
bool reconnecting;
|
||||
bool first_contact;
|
||||
bool farewell;
|
||||
@@ -1141,10 +1172,10 @@ static int server_greeting(struct super_block *sb,
|
||||
goto send_err;
|
||||
}
|
||||
|
||||
if (gr->format_hash != super->format_hash) {
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "client sent format 0x%llx, server has 0x%llx",
|
||||
le64_to_cpu(gr->format_hash),
|
||||
le64_to_cpu(super->format_hash));
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
ret = -EINVAL;
|
||||
goto send_err;
|
||||
}
|
||||
@@ -1154,28 +1185,19 @@ static int server_greeting(struct super_block *sb,
|
||||
if (ret < 0)
|
||||
goto send_err;
|
||||
|
||||
spin_lock(&server->lock);
|
||||
umb = super->unmount_barrier;
|
||||
spin_unlock(&server->lock);
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
ret = insert_mounted_client(sb, le64_to_cpu(gr->rid),
|
||||
le64_to_cpu(gr->flags));
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
} else {
|
||||
umb = gr->unmount_barrier;
|
||||
}
|
||||
|
||||
send_err:
|
||||
err = ret;
|
||||
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.format_hash = super->format_hash;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(server->term);
|
||||
greet.unmount_barrier = umb;
|
||||
greet.rid = gr->rid;
|
||||
greet.flags = 0;
|
||||
|
||||
@@ -1231,19 +1253,17 @@ static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
|
||||
|
||||
/*
|
||||
* This work processes farewell requests asynchronously. Requests from
|
||||
* voting clients can be held until only the final quorum remains and
|
||||
* quorum members can be held until only the final majority remains and
|
||||
* they've all sent farewell requests.
|
||||
*
|
||||
* When we remove the last mounted client record for the last voting
|
||||
* client then we increase the unmount_barrier and write it to the super
|
||||
* block. If voting clients don't get their farewell response they'll
|
||||
* see the greater umount_barrier in the super and will know that their
|
||||
* farewell has been processed and that they can exit.
|
||||
* A client can be disconnected before receiving our farewell response.
|
||||
* Before reconnecting they check for their mounted client item, if it's
|
||||
* been removed then they know that their farewell has been processed
|
||||
* and that they finish unmounting without reconnecting.
|
||||
*
|
||||
* Responses that are waiting for clients who aren't voting are
|
||||
* immediately sent. Clients that don't have a mounted client record
|
||||
* have already had their farewell processed by another server and can
|
||||
* proceed.
|
||||
* Responses for clients who aren't quorum members are immediately sent.
|
||||
* Clients that don't have a mounted client record have already had
|
||||
* their farewell processed by another server and can proceed.
|
||||
*
|
||||
* Farewell responses are unique in that sending them causes the server
|
||||
* to shutdown the connection to the client next time the socket
|
||||
@@ -1265,56 +1285,26 @@ static void farewell_worker(struct work_struct *work)
|
||||
struct farewell_request *tmp;
|
||||
struct farewell_request *fw;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
unsigned int nr_unmounting = 0;
|
||||
unsigned int nr_mounted = 0;
|
||||
unsigned int quo_reqs = 0;
|
||||
unsigned int quo_mnts = 0;
|
||||
unsigned int non_mnts = 0;
|
||||
struct scoutfs_key key;
|
||||
LIST_HEAD(reqs);
|
||||
LIST_HEAD(send);
|
||||
bool deleted = false;
|
||||
bool voting;
|
||||
bool more_reqs;
|
||||
int ret;
|
||||
|
||||
/* grab all the requests that are waiting */
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_splice_init(&server->farewell_requests, &reqs);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
/* count how many reqs requests are from voting clients */
|
||||
nr_unmounting = 0;
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
init_mounted_client_key(&key, fw->rid);
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
mcv = iref.val;
|
||||
voting = (mcv->flags & SCOUTFS_MOUNTED_CLIENT_VOTER) != 0;
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
|
||||
if (!voting) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
|
||||
nr_unmounting++;
|
||||
}
|
||||
|
||||
/* see how many mounted clients could vote for quorum */
|
||||
/* first count mounted clients who could send requests */
|
||||
init_mounted_client_key(&key, 0);
|
||||
for (;;) {
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_next(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
@@ -1328,23 +1318,62 @@ static void farewell_worker(struct work_struct *work)
|
||||
key = *iref.key;
|
||||
mcv = iref.val;
|
||||
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_VOTER)
|
||||
nr_mounted++;
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_QUORUM)
|
||||
quo_mnts++;
|
||||
else
|
||||
non_mnts++;
|
||||
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
/* send as many responses as we can to maintain quorum */
|
||||
while ((fw = list_first_entry_or_null(&reqs, struct farewell_request,
|
||||
entry)) &&
|
||||
(nr_mounted > super->quorum_count ||
|
||||
nr_unmounting >= nr_mounted)) {
|
||||
/* walk requests, checking their mounted client items */
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
init_mounted_client_key(&key, fw->rid);
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret < 0) {
|
||||
/* missing items means we've already processed */
|
||||
if (ret == -ENOENT) {
|
||||
list_move(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_move_tail(&fw->entry, &send);
|
||||
nr_mounted--;
|
||||
nr_unmounting--;
|
||||
deleted = true;
|
||||
mcv = iref.val;
|
||||
|
||||
/* count quo reqs, can always send to non-quo clients */
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_QUORUM) {
|
||||
quo_reqs++;
|
||||
} else {
|
||||
list_move(&fw->entry, &send);
|
||||
non_mnts--;
|
||||
}
|
||||
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only requests from quorum members remain and we've counted
|
||||
* them and remaining mounts. Send responses as long as enough
|
||||
* quorum clients remain for a majority, or all the requests are
|
||||
* from the final majority of quorum clients they're the only
|
||||
* mounted clients.
|
||||
*/
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
if ((quo_mnts > scoutfs_quorum_votes_needed(sb)) ||
|
||||
((quo_reqs == quo_mnts) && (non_mnts == 0))) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
quo_mnts--;
|
||||
quo_reqs--;
|
||||
}
|
||||
}
|
||||
|
||||
/* process and send farewell responses */
|
||||
@@ -1353,24 +1382,12 @@ static void farewell_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* delete mounted client last, client reconnect looks for it */
|
||||
ret = scoutfs_lock_server_farewell(sb, fw->rid) ?:
|
||||
remove_trans_seq(sb, fw->rid) ?:
|
||||
reclaim_log_trees(sb, fw->rid) ?:
|
||||
delete_mounted_client(sb, fw->rid) ?:
|
||||
cancel_srch_compact(sb, fw->rid);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* update the unmount barrier if we deleted all voting clients */
|
||||
if (deleted && nr_mounted == 0) {
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
le64_add_cpu(&super->unmount_barrier, 1);
|
||||
cancel_srch_compact(sb, fw->rid) ?:
|
||||
delete_mounted_client(sb, fw->rid);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
if (ret)
|
||||
@@ -1392,16 +1409,16 @@ static void farewell_worker(struct work_struct *work)
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
more_reqs = !list_empty(&server->farewell_requests);
|
||||
list_splice_init(&reqs, &server->farewell_requests);
|
||||
list_splice_init(&send, &server->farewell_requests);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
if (ret < 0)
|
||||
stop_server(server);
|
||||
else if (more_reqs && !server->shutting_down)
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
else if (more_reqs)
|
||||
queue_farewell_work(server);
|
||||
}
|
||||
|
||||
static void free_farewell_requests(struct super_block *sb, u64 rid)
|
||||
@@ -1409,15 +1426,17 @@ static void free_farewell_requests(struct super_block *sb, u64 rid)
|
||||
struct server_info *server = SCOUTFS_SB(sb)->server_info;
|
||||
struct farewell_request *tmp;
|
||||
struct farewell_request *fw;
|
||||
LIST_HEAD(rid_list);
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_for_each_entry_safe(fw, tmp, &server->farewell_requests, entry) {
|
||||
if (rid == 0 || fw->rid == rid) {
|
||||
list_del_init(&fw->entry);
|
||||
kfree(fw);
|
||||
}
|
||||
if (rid == 0 || fw->rid == rid)
|
||||
list_move_tail(&fw->entry, &rid_list);
|
||||
}
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
list_for_each_entry_safe(fw, tmp, &rid_list, entry)
|
||||
kfree(fw);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1451,11 +1470,11 @@ static int server_farewell(struct super_block *sb,
|
||||
fw->rid = rid;
|
||||
fw->net_id = id;
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_add_tail(&fw->entry, &server->farewell_requests);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
queue_farewell_work(server);
|
||||
|
||||
/* response will be sent later */
|
||||
return 0;
|
||||
@@ -1520,18 +1539,17 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
struct super_block *sb = server->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_net_connection *conn = NULL;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct sockaddr_in sin;
|
||||
LIST_HEAD(conn_list);
|
||||
u64 max_vers;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
trace_scoutfs_server_work_enter(sb, 0, 0);
|
||||
|
||||
sin = server->listen_sin;
|
||||
|
||||
scoutfs_quorum_slot_sin(super, opts->quorum_slot_nr, &sin);
|
||||
scoutfs_info(sb, "server setting up at "SIN_FMT, SIN_ARG(&sin));
|
||||
|
||||
conn = scoutfs_net_alloc_conn(sb, server_notify_up, server_notify_down,
|
||||
@@ -1551,9 +1569,6 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* start up the server subsystems before accepting */
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
@@ -1593,19 +1608,6 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto shutdown;
|
||||
|
||||
/*
|
||||
* Write our address in the super before it's possible for net
|
||||
* processing to start writing the super as part of
|
||||
* transactions. In theory clients could be trying to connect
|
||||
* to our address without having seen it in the super (maybe
|
||||
* they saw it a long time ago).
|
||||
*/
|
||||
scoutfs_addr_from_sin(&super->server_addr, &sin);
|
||||
super->quorum_server_term = cpu_to_le64(server->term);
|
||||
ret = scoutfs_write_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto shutdown;
|
||||
|
||||
/* start accepting connections and processing work */
|
||||
server->conn = conn;
|
||||
scoutfs_net_listen(sb, conn);
|
||||
@@ -1618,39 +1620,28 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
|
||||
shutdown:
|
||||
scoutfs_info(sb, "server shutting down at "SIN_FMT, SIN_ARG(&sin));
|
||||
/* wait for request processing */
|
||||
|
||||
/* wait for farewell to finish sending messages */
|
||||
flush_work(&server->farewell_work);
|
||||
|
||||
/* wait for requests to finish, no more requests */
|
||||
scoutfs_net_shutdown(sb, conn);
|
||||
/* wait for commit queued by request processing */
|
||||
flush_work(&server->commit_work);
|
||||
server->conn = NULL;
|
||||
|
||||
/* wait for extra queues by requests, won't find waiters */
|
||||
flush_work(&server->commit_work);
|
||||
|
||||
scoutfs_lock_server_destroy(sb);
|
||||
|
||||
out:
|
||||
scoutfs_quorum_clear_leader(sb);
|
||||
scoutfs_net_free_conn(sb, conn);
|
||||
|
||||
/* let quorum know that we've shutdown */
|
||||
scoutfs_quorum_server_shutdown(sb);
|
||||
|
||||
scoutfs_info(sb, "server stopped at "SIN_FMT, SIN_ARG(&sin));
|
||||
trace_scoutfs_server_work_exit(sb, 0, ret);
|
||||
|
||||
/*
|
||||
* Always try to clear our presence in the super so that we're
|
||||
* not fenced. We do this last because other mounts will try to
|
||||
* reach quorum the moment they see zero here. The later we do
|
||||
* this the longer we have to finish shutdown while clients
|
||||
* timeout.
|
||||
*/
|
||||
err = scoutfs_read_super(sb, super);
|
||||
if (err == 0) {
|
||||
super->quorum_fenced_term = cpu_to_le64(server->term);
|
||||
memset(&super->server_addr, 0, sizeof(super->server_addr));
|
||||
err = scoutfs_write_super(sb, super);
|
||||
}
|
||||
if (err < 0) {
|
||||
scoutfs_err(sb, "failed to clear election term %llu at "SIN_FMT", this mount could be fenced",
|
||||
server->term, SIN_ARG(&sin));
|
||||
}
|
||||
|
||||
server->err = ret;
|
||||
complete(&server->start_comp);
|
||||
}
|
||||
@@ -1660,14 +1651,12 @@ out:
|
||||
* the super block's fence_term has been set to the new server's term so
|
||||
* that it won't be fenced.
|
||||
*/
|
||||
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
|
||||
u64 term)
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
server->err = 0;
|
||||
server->shutting_down = false;
|
||||
server->listen_sin = *sin;
|
||||
server->term = term;
|
||||
init_completion(&server->start_comp);
|
||||
|
||||
@@ -1696,8 +1685,9 @@ void scoutfs_server_stop(struct super_block *sb)
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
stop_server(server);
|
||||
/* XXX not sure both are needed */
|
||||
|
||||
cancel_work_sync(&server->work);
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
cancel_work_sync(&server->commit_work);
|
||||
}
|
||||
|
||||
@@ -1719,12 +1709,13 @@ int scoutfs_server_setup(struct super_block *sb)
|
||||
INIT_WORK(&server->commit_work, scoutfs_server_commit_func);
|
||||
init_rwsem(&server->seq_rwsem);
|
||||
INIT_LIST_HEAD(&server->clients);
|
||||
mutex_init(&server->farewell_mutex);
|
||||
spin_lock_init(&server->farewell_lock);
|
||||
INIT_LIST_HEAD(&server->farewell_requests);
|
||||
INIT_WORK(&server->farewell_work, farewell_worker);
|
||||
mutex_init(&server->alloc_mutex);
|
||||
mutex_init(&server->logs_mutex);
|
||||
mutex_init(&server->srch_mutex);
|
||||
mutex_init(&server->mounted_clients_mutex);
|
||||
seqcount_init(&server->roots_seqcount);
|
||||
|
||||
server->wq = alloc_workqueue("scoutfs_server",
|
||||
@@ -1752,11 +1743,12 @@ void scoutfs_server_destroy(struct super_block *sb)
|
||||
|
||||
/* wait for server work to wait for everything to shut down */
|
||||
cancel_work_sync(&server->work);
|
||||
/* farewell work triggers commits */
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
/* recv work/compaction could have left commit_work queued */
|
||||
cancel_work_sync(&server->commit_work);
|
||||
|
||||
/* pending farewell requests are another server's problem */
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
free_farewell_requests(sb, 0);
|
||||
|
||||
trace_scoutfs_server_workqueue_destroy(sb, 0, 0);
|
||||
|
||||
@@ -69,8 +69,7 @@ int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
|
||||
struct sockaddr_in;
|
||||
struct scoutfs_quorum_elected_info;
|
||||
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
|
||||
u64 term);
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
|
||||
|
||||
181
kmod/src/srch.c
181
kmod/src/srch.c
@@ -255,24 +255,9 @@ static u8 height_for_blk(u64 blk)
|
||||
return hei;
|
||||
}
|
||||
|
||||
static void init_file_block(struct super_block *sb, struct scoutfs_block *bl,
|
||||
int level)
|
||||
static inline u32 srch_level_magic(int level)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_block_header *hdr;
|
||||
|
||||
/* don't leak uninit kernel mem.. block should do this for us? */
|
||||
memset(bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
|
||||
hdr = bl->data;
|
||||
hdr->fsid = super->hdr.fsid;
|
||||
hdr->blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
|
||||
|
||||
if (level)
|
||||
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_PARENT);
|
||||
else
|
||||
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK);
|
||||
return level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT : SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -284,39 +269,15 @@ static void init_file_block(struct super_block *sb, struct scoutfs_block *bl,
|
||||
*/
|
||||
static int read_srch_block(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri, int level,
|
||||
struct scoutfs_srch_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_block *bl;
|
||||
int retries = 0;
|
||||
int ret = 0;
|
||||
int mag;
|
||||
u32 magic = srch_level_magic(level);
|
||||
int ret;
|
||||
|
||||
mag = level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT :
|
||||
SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
|
||||
retry:
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (!IS_ERR_OR_NULL(bl) &&
|
||||
!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno, mag)) {
|
||||
|
||||
scoutfs_inc_counter(sb, srch_inconsistent_ref);
|
||||
scoutfs_block_writer_forget(sb, wri, bl);
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
|
||||
if (retries++ == 0)
|
||||
goto retry;
|
||||
|
||||
bl = ERR_PTR(-ESTALE);
|
||||
ret = scoutfs_block_read_ref(sb, ref, magic, bl_ret);
|
||||
if (ret == -ESTALE)
|
||||
scoutfs_inc_counter(sb, srch_read_stale);
|
||||
}
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
bl = NULL;
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -333,7 +294,7 @@ static int read_path_block(struct super_block *sb,
|
||||
{
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_srch_parent *srp;
|
||||
struct scoutfs_srch_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
int level;
|
||||
int ind;
|
||||
int ret;
|
||||
@@ -392,12 +353,10 @@ static int get_file_block(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_srch_parent *srp;
|
||||
struct scoutfs_block *new_bl;
|
||||
struct scoutfs_srch_ref *ref;
|
||||
u64 blkno = 0;
|
||||
struct scoutfs_block_ref new_root_ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
int level;
|
||||
int ind;
|
||||
int err;
|
||||
int ret;
|
||||
u8 hei;
|
||||
|
||||
@@ -409,29 +368,21 @@ static int get_file_block(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
|
||||
memset(&new_root_ref, 0, sizeof(new_root_ref));
|
||||
level = sfl->height;
|
||||
|
||||
ret = scoutfs_block_dirty_ref(sb, alloc, wri, &new_root_ref,
|
||||
srch_level_magic(level), &bl, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
goto out;
|
||||
}
|
||||
blkno = 0;
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, bl);
|
||||
|
||||
init_file_block(sb, bl, sfl->height);
|
||||
if (sfl->height) {
|
||||
if (level) {
|
||||
srp = bl->data;
|
||||
srp->refs[0].blkno = sfl->ref.blkno;
|
||||
srp->refs[0].seq = sfl->ref.seq;
|
||||
srp->refs[0] = sfl->ref;
|
||||
}
|
||||
|
||||
hdr = bl->data;
|
||||
sfl->ref.blkno = hdr->blkno;
|
||||
sfl->ref.seq = hdr->seq;
|
||||
sfl->ref = new_root_ref;
|
||||
sfl->height++;
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
@@ -447,54 +398,13 @@ static int get_file_block(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* read an existing block */
|
||||
if (ref->blkno) {
|
||||
ret = read_srch_block(sb, wri, level, ref, &bl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* allocate a new block if we need it */
|
||||
if (!ref->blkno || ((flags & GFB_DIRTY) &&
|
||||
!scoutfs_block_writer_is_dirty(sb, bl))) {
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
new_bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(new_bl)) {
|
||||
ret = PTR_ERR(new_bl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bl) {
|
||||
/* cow old block if we have one */
|
||||
ret = scoutfs_free_meta(sb, alloc, wri,
|
||||
bl->blkno);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
memcpy(new_bl->data, bl->data,
|
||||
SCOUTFS_BLOCK_LG_SIZE);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = new_bl;
|
||||
hdr = bl->data;
|
||||
hdr->blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
|
||||
} else {
|
||||
/* init new allocated block */
|
||||
bl = new_bl;
|
||||
init_file_block(sb, bl, level);
|
||||
}
|
||||
|
||||
blkno = 0;
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, bl);
|
||||
|
||||
/* update file or parent block ref */
|
||||
hdr = bl->data;
|
||||
ref->blkno = hdr->blkno;
|
||||
ref->seq = hdr->seq;
|
||||
}
|
||||
if (flags & GFB_DIRTY)
|
||||
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, srch_level_magic(level),
|
||||
&bl, 0, NULL);
|
||||
else
|
||||
ret = scoutfs_block_read_ref(sb, ref, srch_level_magic(level), &bl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (level == 0) {
|
||||
ret = 0;
|
||||
@@ -514,12 +424,6 @@ static int get_file_block(struct super_block *sb,
|
||||
out:
|
||||
scoutfs_block_put(sb, parent);
|
||||
|
||||
/* return allocated blkno on error */
|
||||
if (blkno > 0) {
|
||||
err = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
BUG_ON(err); /* radix should have been dirty */
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
@@ -1198,14 +1102,10 @@ int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
|
||||
for (;;scoutfs_key_inc(&key)) {
|
||||
ret = scoutfs_btree_next(sb, root, &key, &iref);
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
sc->nr = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(struct scoutfs_srch_file)) {
|
||||
if (iref.key->sk_type != type) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len == sizeof(sfl)) {
|
||||
key = *iref.key;
|
||||
memcpy(&sfl, iref.val, iref.val_len);
|
||||
} else {
|
||||
@@ -1213,24 +1113,25 @@ int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
/* see if we ran out of log files or files entirely */
|
||||
if (ret == -ENOENT) {
|
||||
sc->nr = 0;
|
||||
if (type == SCOUTFS_SRCH_LOG_TYPE) {
|
||||
type = SCOUTFS_SRCH_BLOCKS_TYPE;
|
||||
init_srch_key(&key, type, 0, 0);
|
||||
continue;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* skip any files already being compacted */
|
||||
if (scoutfs_spbm_test(&busy, le64_to_cpu(sfl.ref.blkno)))
|
||||
continue;
|
||||
|
||||
/* see if we ran out of log files or files entirely */
|
||||
if (key.sk_type != type) {
|
||||
sc->nr = 0;
|
||||
if (key.sk_type == SCOUTFS_SRCH_BLOCKS_TYPE) {
|
||||
type = SCOUTFS_SRCH_BLOCKS_TYPE;
|
||||
} else {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* reset if we iterated into the next size category */
|
||||
if (type == SCOUTFS_SRCH_BLOCKS_TYPE) {
|
||||
order = fls64(le64_to_cpu(sfl.blocks)) /
|
||||
|
||||
113
kmod/src/super.c
113
kmod/src/super.c
@@ -176,7 +176,8 @@ static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
seq_printf(seq, ",server_addr="SIN_FMT, SIN_ARG(&opts->server_addr));
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
@@ -192,20 +193,19 @@ static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t server_addr_show(struct kobject *kobj,
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, SIN_FMT"\n",
|
||||
SIN_ARG(&opts->server_addr));
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(server_addr);
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(server_addr),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -257,15 +257,12 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
|
||||
/* the server locks the listen address and compacts */
|
||||
scoutfs_quorum_destroy(sb);
|
||||
scoutfs_lock_shutdown(sb);
|
||||
scoutfs_server_destroy(sb);
|
||||
scoutfs_net_destroy(sb);
|
||||
scoutfs_lock_destroy(sb);
|
||||
|
||||
/* server clears quorum leader flag during shutdown */
|
||||
scoutfs_quorum_destroy(sb);
|
||||
|
||||
scoutfs_block_destroy(sb);
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
@@ -309,6 +306,34 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
sizeof(struct scoutfs_super_block));
|
||||
}
|
||||
|
||||
static bool invalid_blkno_limits(struct super_block *sb, char *which,
|
||||
u64 start, __le64 first, __le64 last,
|
||||
struct block_device *bdev, int shift)
|
||||
{
|
||||
u64 blkno;
|
||||
|
||||
if (le64_to_cpu(first) < start) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
|
||||
which, le64_to_cpu(first), start);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(first) > le64_to_cpu(last)) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
|
||||
which, le64_to_cpu(first), which, le64_to_cpu(last));
|
||||
return true;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
|
||||
if (le64_to_cpu(last) > blkno) {
|
||||
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
|
||||
which, le64_to_cpu(last), blkno);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read super, specifying bdev.
|
||||
*/
|
||||
@@ -316,9 +341,9 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
struct block_device *bdev,
|
||||
struct scoutfs_super_block *super_res)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super;
|
||||
__le32 calc;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -352,58 +377,27 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
}
|
||||
|
||||
|
||||
if (super->format_hash != cpu_to_le64(SCOUTFS_FORMAT_HASH)) {
|
||||
scoutfs_err(sb, "super block has invalid format hash 0x%llx, expected 0x%llx",
|
||||
le64_to_cpu(super->format_hash),
|
||||
SCOUTFS_FORMAT_HASH);
|
||||
if (super->version != cpu_to_le64(SCOUTFS_INTEROP_VERSION)) {
|
||||
scoutfs_err(sb, "super block has invalid version %llu, expected %llu",
|
||||
le64_to_cpu(super->version),
|
||||
SCOUTFS_INTEROP_VERSION);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX do we want more rigorous invalid super checking? */
|
||||
|
||||
if (super->quorum_count == 0 ||
|
||||
super->quorum_count > SCOUTFS_QUORUM_MAX_COUNT) {
|
||||
scoutfs_err(sb, "super block has invalid quorum count %u, must be > 0 and <= %u",
|
||||
super->quorum_count, SCOUTFS_QUORUM_MAX_COUNT);
|
||||
if (invalid_blkno_limits(sb, "meta",
|
||||
SCOUTFS_META_DEV_START_BLKNO,
|
||||
super->first_meta_blkno,
|
||||
super->last_meta_blkno, sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
invalid_blkno_limits(sb, "data",
|
||||
SCOUTFS_DATA_DEV_START_BLKNO,
|
||||
super->first_data_blkno,
|
||||
super->last_data_blkno, sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
blkno = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT;
|
||||
if (le64_to_cpu(super->first_meta_blkno) < blkno) {
|
||||
scoutfs_err(sb, "super block first meta blkno %llu is within quorum blocks",
|
||||
le64_to_cpu(super->first_meta_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->first_meta_blkno) >
|
||||
le64_to_cpu(super->last_meta_blkno)) {
|
||||
scoutfs_err(sb, "super block first meta blkno %llu is greater than last meta blkno %llu",
|
||||
le64_to_cpu(super->first_meta_blkno),
|
||||
le64_to_cpu(super->last_meta_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->first_data_blkno) >
|
||||
le64_to_cpu(super->last_data_blkno)) {
|
||||
scoutfs_err(sb, "super block first data blkno %llu is greater than last data blkno %llu",
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(sb->s_bdev->bd_inode) >>
|
||||
SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
if (le64_to_cpu(super->last_data_blkno) > blkno) {
|
||||
scoutfs_err(sb, "super block last data blkno %llu is outsite device size last blkno %llu",
|
||||
le64_to_cpu(super->last_data_blkno), blkno);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -599,8 +593,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_lock_setup(sb) ?:
|
||||
scoutfs_net_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_server_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_client_setup(sb) ?:
|
||||
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
|
||||
&sbi->rid_lock) ?:
|
||||
@@ -682,6 +676,10 @@ static int __init scoutfs_module_init(void)
|
||||
".section .note.git_describe,\"a\"\n"
|
||||
".string \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_interop_version,\"a\"\n"
|
||||
".string \""SCOUTFS_INTEROP_VERSION_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
|
||||
scoutfs_init_counters();
|
||||
|
||||
@@ -714,3 +712,4 @@ module_exit(scoutfs_module_exit)
|
||||
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_INFO(git_describe, SCOUTFS_GIT_DESCRIBE);
|
||||
MODULE_INFO(scoutfs_interop_version, SCOUTFS_INTEROP_VERSION_STR);
|
||||
|
||||
362
kmod/src/trans.c
362
kmod/src/trans.c
@@ -39,17 +39,15 @@
|
||||
* track the relationships between dirty blocks so there's only ever one
|
||||
* transaction being built.
|
||||
*
|
||||
* The copy of the on-disk super block in the fs sb info has its header
|
||||
* sequence advanced so that new dirty blocks inherit this dirty
|
||||
* sequence number. It's only advanced once all those dirty blocks are
|
||||
* reachable after having first written them all out and then the new
|
||||
* super with that seq. It's first incremented at mount.
|
||||
* Committing the current dirty transaction can be triggered by sync, a
|
||||
* regular background commit interval, reaching a dirty block threshold,
|
||||
* or the transaction running out of its private allocator resources.
|
||||
* Once all the current holders release the writing func writes out the
|
||||
* dirty blocks while excluding holders until it finishes.
|
||||
*
|
||||
* Unfortunately writers can nest. We don't bother trying to special
|
||||
* case holding a transaction that you're already holding because that
|
||||
* requires per-task storage. We just let anyone hold transactions
|
||||
* regardless of waiters waiting to write, which risks waiters waiting a
|
||||
* very long time.
|
||||
* Unfortunately writing holders can nest. We track nested hold callers
|
||||
* with the per-task journal_info pointer to avoid deadlocks between
|
||||
* holders that might otherwise wait for a pending commit.
|
||||
*/
|
||||
|
||||
/* sync dirty data at least this often */
|
||||
@@ -59,11 +57,7 @@
|
||||
* XXX move the rest of the super trans_ fields here.
|
||||
*/
|
||||
struct trans_info {
|
||||
spinlock_t lock;
|
||||
unsigned reserved_items;
|
||||
unsigned reserved_vals;
|
||||
unsigned holders;
|
||||
bool writing;
|
||||
atomic_t holders;
|
||||
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_alloc alloc;
|
||||
@@ -73,17 +67,9 @@ struct trans_info {
|
||||
#define DECLARE_TRANS_INFO(sb, name) \
|
||||
struct trans_info *name = SCOUTFS_SB(sb)->trans_info
|
||||
|
||||
static bool drained_holders(struct trans_info *tri)
|
||||
{
|
||||
bool drained;
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
tri->writing = true;
|
||||
drained = tri->holders == 0;
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
return drained;
|
||||
}
|
||||
/* avoid the high sign bit out of an abundance of caution*/
|
||||
#define TRANS_HOLDERS_WRITE_FUNC_BIT (1 << 30)
|
||||
#define TRANS_HOLDERS_COUNT_MASK (TRANS_HOLDERS_WRITE_FUNC_BIT - 1)
|
||||
|
||||
static int commit_btrees(struct super_block *sb)
|
||||
{
|
||||
@@ -128,6 +114,36 @@ bool scoutfs_trans_has_dirty(struct super_block *sb)
|
||||
return scoutfs_block_writer_has_dirty(sb, &tri->wri);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is racing with wait_event conditions, make sure our atomic
|
||||
* stores and waitqueue loads are ordered.
|
||||
*/
|
||||
static void sub_holders_and_wake(struct super_block *sb, int val)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
atomic_sub(val, &tri->holders);
|
||||
smp_mb(); /* make sure sub is visible before we wake */
|
||||
if (waitqueue_active(&sbi->trans_hold_wq))
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
* called as a wait_event condition, needs to be careful to not change
|
||||
* task state and is racing with waking paths that sub_return, test, and
|
||||
* wake.
|
||||
*/
|
||||
static bool drained_holders(struct trans_info *tri)
|
||||
{
|
||||
int holders;
|
||||
|
||||
smp_mb(); /* make sure task in wait_event queue before atomic read */
|
||||
holders = atomic_read(&tri->holders) & TRANS_HOLDERS_COUNT_MASK;
|
||||
|
||||
return holders == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work func is responsible for writing out all the dirty blocks
|
||||
* that make up the current dirty transaction. It prevents writers from
|
||||
@@ -164,6 +180,9 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
|
||||
sbi->trans_task = current;
|
||||
|
||||
/* mark that we're writing so holders wait for us to finish and clear our bit */
|
||||
atomic_add(TRANS_HOLDERS_WRITE_FUNC_BIT, &tri->holders);
|
||||
|
||||
wait_event(sbi->trans_hold_wq, drained_holders(tri));
|
||||
|
||||
trace_scoutfs_trans_write_func(sb,
|
||||
@@ -215,11 +234,8 @@ out:
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
wake_up(&sbi->trans_write_wq);
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
tri->writing = false;
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
/* we're done, wake waiting holders */
|
||||
sub_holders_and_wake(sb, TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
|
||||
sbi->trans_task = NULL;
|
||||
|
||||
@@ -311,64 +327,83 @@ void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
|
||||
}
|
||||
|
||||
/*
|
||||
* Each thread reserves space in the segment for their dirty items while
|
||||
* they hold the transaction. This is calculated before the first
|
||||
* transaction hold is acquired. It includes all the potential nested
|
||||
* item manipulation that could happen with the transaction held.
|
||||
* Including nested holds avoids having to deal with writing out partial
|
||||
* transactions while a caller still holds the transaction.
|
||||
* We store nested holders in the lower bits of journal_info. We use
|
||||
* some higher bits as a magic value to detect if something goes
|
||||
* horribly wrong and it gets clobbered.
|
||||
*/
|
||||
#define SCOUTFS_RESERVATION_MAGIC 0xd57cd13b
|
||||
struct scoutfs_reservation {
|
||||
unsigned magic;
|
||||
unsigned holders;
|
||||
struct scoutfs_item_count reserved;
|
||||
struct scoutfs_item_count actual;
|
||||
};
|
||||
#define TRANS_JI_MAGIC 0xd5700000
|
||||
#define TRANS_JI_MAGIC_MASK 0xfff00000
|
||||
#define TRANS_JI_COUNT_MASK 0x000fffff
|
||||
|
||||
/* returns true if a caller already had a holder counted in journal_info */
|
||||
static bool inc_journal_info_holders(void)
|
||||
{
|
||||
unsigned long holders = (unsigned long)current->journal_info;
|
||||
|
||||
WARN_ON_ONCE(holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) != TRANS_JI_MAGIC));
|
||||
|
||||
if (holders == 0)
|
||||
holders = TRANS_JI_MAGIC;
|
||||
holders++;
|
||||
|
||||
current->journal_info = (void *)holders;
|
||||
return (holders > (TRANS_JI_MAGIC | 1));
|
||||
}
|
||||
|
||||
static void dec_journal_info_holders(void)
|
||||
{
|
||||
unsigned long holders = (unsigned long)current->journal_info;
|
||||
|
||||
WARN_ON_ONCE(holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) != TRANS_JI_MAGIC));
|
||||
WARN_ON_ONCE((holders & TRANS_JI_COUNT_MASK) == 0);
|
||||
|
||||
holders--;
|
||||
if (holders == TRANS_JI_MAGIC)
|
||||
holders = 0;
|
||||
|
||||
current->journal_info = (void *)holders;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to hold the transaction. If a caller already holds the trans then
|
||||
* we piggy back on their hold. We wait if the writer is trying to
|
||||
* write out the transation. And if our items won't fit then we kick off
|
||||
* a write.
|
||||
* This is called as the wait_event condition for holding a transaction.
|
||||
* Increment the holder count unless the writer is present. We return
|
||||
* false to wait until the writer finishes and wakes us.
|
||||
*
|
||||
* This is called as a condition for wait_event. It is very limited in
|
||||
* the locking (blocking) it can do because the caller has set the task
|
||||
* state before testing the condition safely race with waking after
|
||||
* setting the condition. Our checking the amount of dirty metadata
|
||||
* blocks and free data blocks is racy, but we don't mind the risk of
|
||||
* delaying or prematurely forcing commits.
|
||||
* This can be racing with itself while there's no waiters. We retry
|
||||
* the cmpxchg instead of returning and waiting.
|
||||
*/
|
||||
static bool acquired_hold(struct super_block *sb,
|
||||
struct scoutfs_reservation *rsv,
|
||||
const struct scoutfs_item_count *cnt)
|
||||
static bool inc_holders_unless_writer(struct trans_info *tri)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired = false;
|
||||
unsigned items;
|
||||
unsigned vals;
|
||||
int holders;
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
do {
|
||||
smp_mb(); /* make sure we read after wait puts task in queue */
|
||||
holders = atomic_read(&tri->holders);
|
||||
if (holders & TRANS_HOLDERS_WRITE_FUNC_BIT)
|
||||
return false;
|
||||
|
||||
trace_scoutfs_trans_acquired_hold(sb, cnt, rsv, rsv->holders,
|
||||
&rsv->reserved, &rsv->actual,
|
||||
tri->holders, tri->writing,
|
||||
tri->reserved_items,
|
||||
tri->reserved_vals);
|
||||
} while (atomic_cmpxchg(&tri->holders, holders, holders + 1) != holders);
|
||||
|
||||
/* use a caller's existing reservation */
|
||||
if (rsv->holders)
|
||||
goto hold;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* wait until the writing thread is finished */
|
||||
if (tri->writing)
|
||||
goto out;
|
||||
|
||||
/* see if we can reserve space for our item count */
|
||||
items = tri->reserved_items + cnt->items;
|
||||
vals = tri->reserved_vals + cnt->vals;
|
||||
/*
|
||||
* As we drop the last trans holder we try to wake a writing thread that
|
||||
* was waiting for us to finish.
|
||||
*/
|
||||
static void release_holders(struct super_block *sb)
|
||||
{
|
||||
dec_journal_info_holders();
|
||||
sub_holders_and_wake(sb, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has incremented holders so it is blocking commits. We
|
||||
* make some quick checks to see if we need to trigger and wait for
|
||||
* another commit before proceeding.
|
||||
*/
|
||||
static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
{
|
||||
/*
|
||||
* In theory each dirty item page could be straddling two full
|
||||
* blocks, requiring 4 allocations for each item cache page.
|
||||
@@ -378,11 +413,9 @@ static bool acquired_hold(struct super_block *sb,
|
||||
* that it accounts for having to dirty parent blocks and
|
||||
* whatever dirtying is done during the transaction hold.
|
||||
*/
|
||||
if (scoutfs_alloc_meta_low(sb, &tri->alloc,
|
||||
scoutfs_item_dirty_pages(sb) * 2)) {
|
||||
if (scoutfs_alloc_meta_low(sb, &tri->alloc, scoutfs_item_dirty_pages(sb) * 2)) {
|
||||
scoutfs_inc_counter(sb, trans_commit_dirty_meta_full);
|
||||
queue_trans_work(sbi);
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -394,71 +427,74 @@ static bool acquired_hold(struct super_block *sb,
|
||||
*/
|
||||
if (scoutfs_alloc_meta_low(sb, &tri->alloc, 16)) {
|
||||
scoutfs_inc_counter(sb, trans_commit_meta_alloc_low);
|
||||
queue_trans_work(sbi);
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try to refill data allocator before premature enospc */
|
||||
if (scoutfs_data_alloc_free_bytes(sb) <= SCOUTFS_TRANS_DATA_ALLOC_LWM) {
|
||||
scoutfs_inc_counter(sb, trans_commit_data_alloc_low);
|
||||
queue_trans_work(sbi);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool acquired_hold(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired;
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
acquired = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tri->reserved_items = items;
|
||||
tri->reserved_vals = vals;
|
||||
/* wait if the writer is blocking holds */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rsv->reserved.items = cnt->items;
|
||||
rsv->reserved.vals = cnt->vals;
|
||||
/* wait if we're triggering another commit */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
release_holders(sb);
|
||||
queue_trans_work(sbi);
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hold:
|
||||
rsv->holders++;
|
||||
tri->holders++;
|
||||
trace_scoutfs_trans_acquired_hold(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
acquired = true;
|
||||
|
||||
out:
|
||||
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
return acquired;
|
||||
}
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb,
|
||||
const struct scoutfs_item_count cnt)
|
||||
/*
|
||||
* Try to hold the transaction. Holding the transaction prevents it
|
||||
* from being committed. If a transaction is currently being written
|
||||
* then we'll block until it's done and our hold can be granted.
|
||||
*
|
||||
* If a caller already holds the trans then we unconditionally acquire
|
||||
* our hold and return to avoid deadlocks with our caller, the writing
|
||||
* thread, and us. We record nested holds in a call stack with the
|
||||
* journal_info pointer in the task_struct.
|
||||
*
|
||||
* The writing thread marks itself as a global trans_task which
|
||||
* short-circuits all the hold machinery so it can call code that would
|
||||
* otherwise try to hold transactions while it is writing.
|
||||
*/
|
||||
int scoutfs_hold_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_reservation *rsv;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Caller shouldn't provide garbage counts, nor counts that
|
||||
* can't fit in segments by themselves.
|
||||
*/
|
||||
if (WARN_ON_ONCE(cnt.items <= 0 || cnt.vals < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
return 0;
|
||||
|
||||
rsv = current->journal_info;
|
||||
if (rsv == NULL) {
|
||||
rsv = kzalloc(sizeof(struct scoutfs_reservation), GFP_NOFS);
|
||||
if (!rsv)
|
||||
return -ENOMEM;
|
||||
|
||||
rsv->magic = SCOUTFS_RESERVATION_MAGIC;
|
||||
current->journal_info = rsv;
|
||||
}
|
||||
|
||||
BUG_ON(rsv->magic != SCOUTFS_RESERVATION_MAGIC);
|
||||
|
||||
ret = wait_event_interruptible(sbi->trans_hold_wq,
|
||||
acquired_hold(sb, rsv, &cnt));
|
||||
if (ret && rsv->holders == 0) {
|
||||
current->journal_info = NULL;
|
||||
kfree(rsv);
|
||||
}
|
||||
return ret;
|
||||
return wait_event_interruptible(sbi->trans_hold_wq, acquired_hold(sb));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -468,86 +504,22 @@ int scoutfs_hold_trans(struct super_block *sb,
|
||||
*/
|
||||
bool scoutfs_trans_held(void)
|
||||
{
|
||||
struct scoutfs_reservation *rsv = current->journal_info;
|
||||
unsigned long holders = (unsigned long)current->journal_info;
|
||||
|
||||
return rsv && rsv->magic == SCOUTFS_RESERVATION_MAGIC;
|
||||
return (holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) == TRANS_JI_MAGIC));
|
||||
}
|
||||
|
||||
/*
|
||||
* Record a transaction holder's individual contribution to the dirty
|
||||
* items in the current transaction. We're making sure that the
|
||||
* reservation matches the possible item manipulations while they hold
|
||||
* the reservation.
|
||||
*
|
||||
* It is possible and legitimate for an individual contribution to be
|
||||
* negative if they delete dirty items. The item cache makes sure that
|
||||
* the total dirty item count doesn't fall below zero.
|
||||
*/
|
||||
void scoutfs_trans_track_item(struct super_block *sb, signed items,
|
||||
signed vals)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_reservation *rsv = current->journal_info;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
return;
|
||||
|
||||
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
|
||||
|
||||
rsv->actual.items += items;
|
||||
rsv->actual.vals += vals;
|
||||
|
||||
trace_scoutfs_trans_track_item(sb, items, vals, rsv->actual.items,
|
||||
rsv->actual.vals, rsv->reserved.items,
|
||||
rsv->reserved.vals);
|
||||
|
||||
WARN_ON_ONCE(rsv->actual.items > rsv->reserved.items);
|
||||
WARN_ON_ONCE(rsv->actual.vals > rsv->reserved.vals);
|
||||
}
|
||||
|
||||
/*
|
||||
* As we drop the last hold in the reservation we try and wake other
|
||||
* hold attempts that were waiting for space. As we drop the last trans
|
||||
* holder we try to wake a writing thread that was waiting for us to
|
||||
* finish.
|
||||
*/
|
||||
void scoutfs_release_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_reservation *rsv;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool wake = false;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
return;
|
||||
|
||||
rsv = current->journal_info;
|
||||
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
|
||||
release_holders(sb);
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
|
||||
trace_scoutfs_release_trans(sb, rsv, rsv->holders, &rsv->reserved,
|
||||
&rsv->actual, tri->holders, tri->writing,
|
||||
tri->reserved_items, tri->reserved_vals);
|
||||
|
||||
BUG_ON(rsv->holders <= 0);
|
||||
BUG_ON(tri->holders <= 0);
|
||||
|
||||
if (--rsv->holders == 0) {
|
||||
tri->reserved_items -= rsv->reserved.items;
|
||||
tri->reserved_vals -= rsv->reserved.vals;
|
||||
current->journal_info = NULL;
|
||||
kfree(rsv);
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if (--tri->holders == 0)
|
||||
wake = true;
|
||||
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
if (wake)
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -576,7 +548,7 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
if (!tri)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&tri->lock);
|
||||
atomic_set(&tri->holders, 0);
|
||||
scoutfs_block_writer_init(sb, &tri->wri);
|
||||
|
||||
sbi->trans_write_workq = alloc_workqueue("scoutfs_trans",
|
||||
|
||||
@@ -6,21 +6,16 @@
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
|
||||
|
||||
#include "count.h"
|
||||
|
||||
void scoutfs_trans_write_func(struct work_struct *work);
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait);
|
||||
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb,
|
||||
const struct scoutfs_item_count cnt);
|
||||
int scoutfs_hold_trans(struct super_block *sb);
|
||||
bool scoutfs_trans_held(void);
|
||||
void scoutfs_release_trans(struct super_block *sb);
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb);
|
||||
void scoutfs_trans_track_item(struct super_block *sb, signed items,
|
||||
signed vals);
|
||||
|
||||
int scoutfs_trans_get_log_trees(struct super_block *sb);
|
||||
bool scoutfs_trans_has_dirty(struct super_block *sb);
|
||||
|
||||
@@ -38,10 +38,7 @@ struct scoutfs_triggers {
|
||||
struct scoutfs_triggers *name = SCOUTFS_SB(sb)->triggers
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BTREE_STALE_READ] = "btree_stale_read",
|
||||
[SCOUTFS_TRIGGER_BTREE_ADVANCE_RING_HALF] = "btree_advance_ring_half",
|
||||
[SCOUTFS_TRIGGER_HARD_STALE_ERROR] = "hard_stale_error",
|
||||
[SCOUTFS_TRIGGER_SEG_STALE_READ] = "seg_stale_read",
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||
};
|
||||
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
#define _SCOUTFS_TRIGGERS_H_
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BTREE_STALE_READ,
|
||||
SCOUTFS_TRIGGER_BTREE_ADVANCE_RING_HALF,
|
||||
SCOUTFS_TRIGGER_HARD_STALE_ERROR,
|
||||
SCOUTFS_TRIGGER_SEG_STALE_READ,
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||
SCOUTFS_TRIGGER_NR,
|
||||
};
|
||||
|
||||
@@ -577,10 +577,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_XATTR_SET(found_parts,
|
||||
value != NULL,
|
||||
name_len, size));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -781,7 +778,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
&tgs) != 0)
|
||||
memset(&tgs, 0, sizeof(tgs));
|
||||
|
||||
ret = scoutfs_hold_trans(sb, SIC_EXACT(2, 0));
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (ret < 0)
|
||||
break;
|
||||
release = true;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
t_filter_fs()
|
||||
{
|
||||
sed -e 's@mnt/test\.[0-9]*@mnt/test@g' \
|
||||
-e 's@Device: [a-fA-F0-7]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
}
|
||||
|
||||
#
|
||||
@@ -59,5 +59,8 @@ t_filter_dmesg()
|
||||
# some tests mount w/o options
|
||||
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
|
||||
egrep -v "($re)"
|
||||
}
|
||||
|
||||
@@ -99,6 +99,19 @@ t_first_client_nr()
|
||||
t_fail "t_first_client_nr didn't find any clients"
|
||||
}
|
||||
|
||||
#
|
||||
# The number of quorum members needed to form a majority to start the
|
||||
# server.
|
||||
#
|
||||
t_majority_count()
|
||||
{
|
||||
if [ "$T_QUORUM" -lt 3 ]; then
|
||||
echo 1
|
||||
else
|
||||
echo $(((T_QUORUM / 2) + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
t_mount()
|
||||
{
|
||||
local nr="$1"
|
||||
@@ -116,7 +129,7 @@ t_umount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount \$T_DB$i
|
||||
eval t_quiet umount \$T_M$i
|
||||
}
|
||||
|
||||
#
|
||||
@@ -196,12 +209,19 @@ t_trigger_show() {
|
||||
echo "trigger $which $string: $(t_trigger_get $which $nr)"
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
t_trigger_arm_silent() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo 1 > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
|
||||
t_trigger_arm_silent $which $nr
|
||||
t_trigger_show $which armed $nr
|
||||
}
|
||||
|
||||
@@ -216,16 +236,44 @@ t_counter() {
|
||||
cat "$(t_sysfs_path $nr)/counters/$which"
|
||||
}
|
||||
|
||||
#
|
||||
# output the difference between the current value of a counter and the
|
||||
# caller's provided previous value.
|
||||
#
|
||||
t_counter_diff_value() {
|
||||
local which="$1"
|
||||
local old="$2"
|
||||
local nr="$3"
|
||||
local new="$(t_counter $which $nr)"
|
||||
|
||||
echo "$((new - old))"
|
||||
}
|
||||
|
||||
#
|
||||
# output the value of the given counter for the given mount, defaulting
|
||||
# to mount 0 if a mount isn't specified.
|
||||
# to mount 0 if a mount isn't specified. For tests which expect a
|
||||
# specific difference in counters.
|
||||
#
|
||||
t_counter_diff() {
|
||||
local which="$1"
|
||||
local old="$2"
|
||||
local nr="$3"
|
||||
local new
|
||||
|
||||
new="$(t_counter $which $nr)"
|
||||
echo "counter $which diff $((new - old))"
|
||||
echo "counter $which diff $(t_counter_diff_value $which $old $nr)"
|
||||
}
|
||||
|
||||
#
|
||||
# output a message indicating whether or not the counter value changed.
|
||||
# For tests that expect a difference, or not, but the amount of
|
||||
# difference isn't significant.
|
||||
#
|
||||
t_counter_diff_changed() {
|
||||
local which="$1"
|
||||
local old="$2"
|
||||
local nr="$3"
|
||||
local diff="$(t_counter_diff_value $which $old $nr)"
|
||||
|
||||
test "$diff" -eq 0 && \
|
||||
echo "counter $which didn't change" ||
|
||||
echo "counter $which changed"
|
||||
}
|
||||
|
||||
@@ -23,3 +23,18 @@ t_require_mounts() {
|
||||
test "$T_NR_MOUNTS" -ge "$req" || \
|
||||
t_skip "$req mounts required, only have $T_NR_MOUNTS"
|
||||
}
|
||||
|
||||
#
|
||||
# Require that the meta device be at least the size string argument, as
|
||||
# parsed by numfmt using single char base 2 suffixes (iec).. 64G, etc.
|
||||
#
|
||||
t_require_meta_size() {
|
||||
local dev="$T_META_DEVICE"
|
||||
local req_iec="$1"
|
||||
local req_bytes=$(numfmt --from=iec --to=none $req_iec)
|
||||
local dev_bytes=$(blockdev --getsize64 $dev)
|
||||
local dev_iec=$(numfmt --from=auto --to=iec $dev_bytes)
|
||||
|
||||
test "$dev_bytes" -ge "$req_bytes" || \
|
||||
t_skip "$dev must be at least $req_iec, is $dev_iec"
|
||||
}
|
||||
|
||||
52
tests/golden/block-stale-reads
Normal file
52
tests/golden/block-stale-reads
Normal file
@@ -0,0 +1,52 @@
|
||||
== create shared test file
|
||||
== set and get xattrs between mount pairs while retrying
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="1"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="2"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="3"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="4"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="5"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="6"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="7"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="8"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="9"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="10"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
3
tests/golden/mount-unmount-race
Normal file
3
tests/golden/mount-unmount-race
Normal file
@@ -0,0 +1,3 @@
|
||||
== create per mount files
|
||||
== 30s of racing random mount/umount
|
||||
== mounting any unmounted
|
||||
@@ -1,11 +0,0 @@
|
||||
== create file for xattr ping pong
|
||||
# file: /mnt/test/test/stale-btree-read/file
|
||||
user.xat="initial"
|
||||
|
||||
== retry btree block read
|
||||
trigger btree_stale_read armed: 1
|
||||
# file: /mnt/test/test/stale-btree-read/file
|
||||
user.xat="btree"
|
||||
|
||||
trigger btree_stale_read after: 0
|
||||
counter btree_stale_read diff 1
|
||||
@@ -52,16 +52,17 @@ $(basename $0) options:
|
||||
| the file system to be tested. Will be clobbered by -m mkfs.
|
||||
-m | Run mkfs on the device before mounting and running
|
||||
| tests. Implies unmounting existing mounts first.
|
||||
-n | The number of devices and mounts to test.
|
||||
-P | Output trace events with printk as they're generated.
|
||||
-n <nr> | The number of devices and mounts to test.
|
||||
-P | Enable trace_printk.
|
||||
-p | Exit script after preparing mounts only, don't run tests.
|
||||
-q <nr> | Specify the quorum count needed to mount. This is
|
||||
| used when running mkfs and is needed by a few tests.
|
||||
-q <nr> | The first <nr> mounts will be quorum members. Must be
|
||||
| at least 1 and no greater than -n number of mounts.
|
||||
-r <dir> | Specify the directory in which to store results of
|
||||
| test runs. The directory will be created if it doesn't
|
||||
| exist. Previous results will be deleted as each test runs.
|
||||
-s | Skip git repo checkouts.
|
||||
-t | Enabled trace events that match the given glob argument.
|
||||
| Multiple options enable multiple globbed events.
|
||||
-X | xfstests git repo. Used by tests/xfstests.sh.
|
||||
-x | xfstests git branch to checkout and track.
|
||||
-y | xfstests ./check additional args
|
||||
@@ -77,6 +78,9 @@ done
|
||||
T_TRACE_DUMP="0"
|
||||
T_TRACE_PRINTK="0"
|
||||
|
||||
# array declarations to be able to use array ops
|
||||
declare -a T_TRACE_GLOB
|
||||
|
||||
while true; do
|
||||
case $1 in
|
||||
-a)
|
||||
@@ -147,7 +151,7 @@ while true; do
|
||||
;;
|
||||
-t)
|
||||
test -n "$2" || die "-t must have trace glob argument"
|
||||
T_TRACE_GLOB="$2"
|
||||
T_TRACE_GLOB+=("$2")
|
||||
shift
|
||||
;;
|
||||
-X)
|
||||
@@ -195,7 +199,6 @@ test -e "$T_EX_META_DEV" || die "extra meta device -f '$T_EX_META_DEV' doesn't e
|
||||
test -n "$T_EX_DATA_DEV" || die "must specify -e extra data device"
|
||||
test -e "$T_EX_DATA_DEV" || die "extra data device -e '$T_EX_DATA_DEV' doesn't exist"
|
||||
|
||||
test -n "$T_MKFS" -a -z "$T_QUORUM" && die "mkfs (-m) requires quorum (-q)"
|
||||
test -n "$T_RESULTS" || die "must specify -r results dir"
|
||||
test -n "$T_XFSTESTS_REPO" -a -z "$T_XFSTESTS_BRANCH" -a -z "$T_SKIP_CHECKOUT" && \
|
||||
die "-X xfstests repo requires -x xfstests branch"
|
||||
@@ -205,6 +208,12 @@ test -n "$T_XFSTESTS_BRANCH" -a -z "$T_XFSTESTS_REPO" -a -z "$T_SKIP_CHECKOUT" &
|
||||
test -n "$T_NR_MOUNTS" || die "must specify -n nr mounts"
|
||||
test "$T_NR_MOUNTS" -ge 1 -a "$T_NR_MOUNTS" -le 8 || \
|
||||
die "-n nr mounts must be >= 1 and <= 8"
|
||||
test -n "$T_QUORUM" || \
|
||||
die "must specify -q number of mounts that are quorum members"
|
||||
test "$T_QUORUM" -ge "1" || \
|
||||
die "-q quorum mmembers must be at least 1"
|
||||
test "$T_QUORUM" -le "$T_NR_MOUNTS" || \
|
||||
die "-q quorum mmembers must not be greater than -n mounts"
|
||||
|
||||
# top level paths
|
||||
T_KMOD=$(realpath "$(dirname $0)/../kmod")
|
||||
@@ -303,8 +312,14 @@ if [ -n "$T_UNMOUNT" ]; then
|
||||
unmount_all
|
||||
fi
|
||||
|
||||
quo=""
|
||||
if [ -n "$T_MKFS" ]; then
|
||||
cmd scoutfs mkfs -Q "$T_QUORUM" "$T_META_DEVICE" "$T_DATA_DEVICE" -f
|
||||
for i in $(seq -0 $((T_QUORUM - 1))); do
|
||||
quo="$quo -Q $i,127.0.0.1,$((42000 + i))"
|
||||
done
|
||||
|
||||
msg "making new filesystem with $T_QUORUM quorum members"
|
||||
cmd scoutfs mkfs -f $quo "$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
fi
|
||||
|
||||
if [ -n "$T_INSMOD" ]; then
|
||||
@@ -314,23 +329,37 @@ if [ -n "$T_INSMOD" ]; then
|
||||
cmd insmod "$T_KMOD/src/scoutfs.ko"
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_GLOB" ]; then
|
||||
msg "enabling trace events"
|
||||
nr_globs=${#T_TRACE_GLOB[@]}
|
||||
if [ $nr_globs -gt 0 ]; then
|
||||
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
|
||||
for g in $T_TRACE_GLOB; do
|
||||
|
||||
for g in "${T_TRACE_GLOB[@]}"; do
|
||||
for e in /sys/kernel/debug/tracing/events/scoutfs/$g/enable; do
|
||||
echo 1 > $e
|
||||
if test -w "$e"; then
|
||||
echo 1 > "$e"
|
||||
else
|
||||
die "-t glob '$g' matched no scoutfs events"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "$T_TRACE_DUMP" > /proc/sys/kernel/ftrace_dump_on_oops
|
||||
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
|
||||
|
||||
cmd cat /sys/kernel/debug/tracing/set_event
|
||||
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
nr_events=$(cat /sys/kernel/debug/tracing/set_event | wc -l)
|
||||
msg "enabled $nr_events trace events from $nr_globs -t globs"
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_PRINTK" ]; then
|
||||
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_DUMP" ]; then
|
||||
echo "$T_TRACE_DUMP" > /proc/sys/kernel/ftrace_dump_on_oops
|
||||
fi
|
||||
|
||||
# always describe tracing in the logs
|
||||
cmd cat /sys/kernel/debug/tracing/set_event
|
||||
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
|
||||
#
|
||||
# mount concurrently so that a quorum is present to elect the leader and
|
||||
# start a server.
|
||||
@@ -347,8 +376,12 @@ for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="/mnt/test.$i"
|
||||
test -d "$dir" || cmd mkdir -p "$dir"
|
||||
|
||||
opts="-o metadev_path=$meta_dev"
|
||||
if [ "$i" -lt "$T_QUORUM" ]; then
|
||||
opts="$opts,quorum_slot_nr=$i"
|
||||
fi
|
||||
|
||||
msg "mounting $meta_dev|$data_dev on $dir"
|
||||
opts="-o server_addr=127.0.0.1,metadev_path=$meta_dev"
|
||||
cmd mount -t scoutfs $opts "$data_dev" "$dir" &
|
||||
|
||||
p="$!"
|
||||
@@ -434,7 +467,7 @@ for t in $tests; do
|
||||
|
||||
# get stats from previous pass
|
||||
last="$T_RESULTS/last-passed-test-stats"
|
||||
stats=$(grep -s "^$test_name" "$last" | cut -d " " -f 2-)
|
||||
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
|
||||
test -n "$stats" && stats="last: $stats"
|
||||
|
||||
printf " %-30s $stats" "$test_name"
|
||||
@@ -497,7 +530,7 @@ for t in $tests; do
|
||||
echo " passed: $stats"
|
||||
((passed++))
|
||||
# save stats for passed test
|
||||
grep -s -v "^$test_name" "$last" > "$last.tmp"
|
||||
grep -s -v "^$test_name " "$last" > "$last.tmp"
|
||||
echo "$test_name $stats" >> "$last.tmp"
|
||||
mv -f "$last.tmp" "$last"
|
||||
elif [ "$sts" == "$T_SKIP_STATUS" ]; then
|
||||
@@ -515,23 +548,24 @@ done
|
||||
|
||||
msg "all tests run: $passed passed, $skipped skipped, $failed failed"
|
||||
|
||||
unmount_all
|
||||
|
||||
if [ -n "$T_TRACE_GLOB" ]; then
|
||||
if [ -n "$T_TRACE_GLOB" -o -n "$T_TRACE_PRINTK" ]; then
|
||||
msg "saving traces and disabling tracing"
|
||||
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
|
||||
echo 0 > /sys/kernel/debug/tracing/options/trace_printk
|
||||
cat /sys/kernel/debug/tracing/trace > "$T_RESULTS/traces"
|
||||
fi
|
||||
|
||||
if [ "$skipped" == 0 -a "$failed" == 0 ]; then
|
||||
msg "all tests passed"
|
||||
unmount_all
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$skipped" != 0 ]; then
|
||||
msg "$skipped tests skipped, check skip.log"
|
||||
msg "$skipped tests skipped, check skip.log, still mounted"
|
||||
fi
|
||||
if [ "$failed" != 0 ]; then
|
||||
msg "$failed tests failed, check fail.log"
|
||||
msg "$failed tests failed, check fail.log, still mounted"
|
||||
fi
|
||||
exit 1
|
||||
|
||||
@@ -28,5 +28,5 @@ setup-error-teardown.sh
|
||||
mount-unmount-race.sh
|
||||
createmany-parallel-mounts.sh
|
||||
archive-light-cycle.sh
|
||||
stale-btree-read.sh
|
||||
block-stale-reads.sh
|
||||
xfstests.sh
|
||||
|
||||
@@ -160,8 +160,8 @@ for i in $(seq 1 1); do
|
||||
mkdir -p $(dirname $lnk)
|
||||
ln "$T_D0/file" $lnk
|
||||
|
||||
scoutfs ino-path $ino "$T_M0" > "$T_TMP.0"
|
||||
scoutfs ino-path $ino "$T_M1" > "$T_TMP.1"
|
||||
scoutfs ino-path -p "$T_M0" $ino > "$T_TMP.0"
|
||||
scoutfs ino-path -p "$T_M1" $ino > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
done
|
||||
done
|
||||
|
||||
61
tests/tests/block-stale-reads.sh
Normal file
61
tests/tests/block-stale-reads.sh
Normal file
@@ -0,0 +1,61 @@
|
||||
#
|
||||
# Exercise stale block reading.
|
||||
#
|
||||
# It would be very difficult to manipulate the allocators, cache, and
|
||||
# persistent blocks to create stable block reading scenarios. Instead
|
||||
# we use triggers to exercise how readers encounter stale blocks.
|
||||
#
|
||||
|
||||
t_require_commands touch setfattr getfattr
|
||||
|
||||
inc_wrap_fs_nr()
|
||||
{
|
||||
local nr="$(($1 + 1))"
|
||||
|
||||
if [ "$nr" == "$T_NR_MOUNTS" ]; then
|
||||
nr=0
|
||||
fi
|
||||
|
||||
echo $nr
|
||||
}
|
||||
|
||||
GETFATTR="getfattr --absolute-names"
|
||||
SETFATTR="setfattr"
|
||||
|
||||
echo "== create shared test file"
|
||||
touch "$T_D0/file"
|
||||
$SETFATTR -n user.xat -v 0 "$T_D0/file"
|
||||
|
||||
#
|
||||
# Trigger retries in the block cache as we bounce xattr values around
|
||||
# between sequential pairs of mounts. This is a little silly because if
|
||||
# either of the mounts are the server then they'll almost certaily have
|
||||
# their trigger fired prematurely by message handling btree calls while
|
||||
# working with the t_ helpers long before we work with the xattrs. But
|
||||
# the block cache stale retry path is still being exercised.
|
||||
#
|
||||
echo "== set and get xattrs between mount pairs while retrying"
|
||||
set_nr=0
|
||||
get_nr=$(inc_wrap_fs_nr $set_nr)
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
eval set_file="\$T_D${set_nr}/file"
|
||||
eval get_file="\$T_D${get_nr}/file"
|
||||
|
||||
old_set=$(t_counter block_cache_remove_stale $set_nr)
|
||||
old_get=$(t_counter block_cache_remove_stale $get_nr)
|
||||
|
||||
t_trigger_arm_silent block_remove_stale $set_nr
|
||||
t_trigger_arm_silent block_remove_stale $get_nr
|
||||
|
||||
$SETFATTR -n user.xat -v $i "$set_file"
|
||||
$GETFATTR -n user.xat "$get_file" 2>&1 | t_filter_fs
|
||||
|
||||
t_counter_diff_changed block_cache_remove_stale $old_set $set_nr
|
||||
t_counter_diff_changed block_cache_remove_stale $old_get $get_nr
|
||||
|
||||
set_nr="$get_nr"
|
||||
get_nr=$(inc_wrap_fs_nr $set_nr)
|
||||
done
|
||||
|
||||
t_pass
|
||||
@@ -50,7 +50,7 @@ for m in 0 1; do
|
||||
done
|
||||
wait
|
||||
CONF="$((SECONDS - START))"
|
||||
echo "conf: $IND" >> $T_TMP.log
|
||||
echo "conf: $CONF" >> $T_TMP.log
|
||||
|
||||
if [ "$CONF" -gt "$((IND * 5))" ]; then
|
||||
t_fail "conflicting $CONF secs is more than 5x independent $IND secs"
|
||||
|
||||
@@ -4,25 +4,23 @@
|
||||
# At the start of the test all mounts are mounted. Each iteration
|
||||
# randomly decides to change each mount or to leave it alone.
|
||||
#
|
||||
# They create dirty items before unmounting to encourage compaction
|
||||
# while unmounting
|
||||
# Each iteration create dirty items across the mounts randomly, giving
|
||||
# unmount some work to do.
|
||||
#
|
||||
# For this test to be meaningful it needs multiple mounts beyond the
|
||||
# quorum set which can be racing to mount and unmount. A reasonable
|
||||
# config would be 5 mounts with 3 quorum. But the test will run with
|
||||
# whatever count it finds.
|
||||
# quorum majority which can be racing to mount and unmount. A
|
||||
# reasonable config would be 5 mounts with 3 quorum members. But the
|
||||
# test will run with whatever count it finds.
|
||||
#
|
||||
# This assumes that all the mounts are configured as voting servers. We
|
||||
# could update it to be more clever and know that it can always safely
|
||||
# unmount mounts that aren't configured as servers.
|
||||
# The test assumes that the first mounts are the quorum members.
|
||||
#
|
||||
|
||||
# nothing to do if we can't unmount
|
||||
test "$T_NR_MOUNTS" == "$T_QUORUM" && \
|
||||
t_skip "only quorum members mounted, can't unmount"
|
||||
majority_nr=$(t_majority_count)
|
||||
quorum_nr=$T_QUORUM
|
||||
|
||||
nr_mounted=$T_NR_MOUNTS
|
||||
nr_quorum=$T_QUORUM
|
||||
cur_quorum=$quorum_nr
|
||||
test "$cur_quorum" == "$majority_nr" && \
|
||||
t_skip "all quorum members make up majority, need more mounts to unmount"
|
||||
|
||||
echo "== create per mount files"
|
||||
for i in $(t_fs_nrs); do
|
||||
@@ -55,25 +53,42 @@ while [ "$SECONDS" -lt "$END" ]; do
|
||||
fi
|
||||
|
||||
if [ "${mounted[$i]}" == 1 ]; then
|
||||
if [ "$nr_mounted" -gt "$nr_quorum" ]; then
|
||||
#
|
||||
# can always unmount non-quorum mounts,
|
||||
# can only unmount quorum members beyond majority
|
||||
#
|
||||
if [ "$i" -ge "$quorum_nr" -o \
|
||||
"$cur_quorum" -gt "$majority_nr" ]; then
|
||||
t_umount $i &
|
||||
pid=$!
|
||||
echo "umount $i pid $pid quo $cur_quorum" \
|
||||
>> $T_TMP.log
|
||||
pids="$pids $pid"
|
||||
mounted[$i]=0
|
||||
(( nr_mounted-- ))
|
||||
if [ "$i" -lt "$quorum_nr" ]; then
|
||||
(( cur_quorum-- ))
|
||||
fi
|
||||
fi
|
||||
else
|
||||
t_mount $i &
|
||||
pid=$!
|
||||
pids="$pids $pid"
|
||||
echo "mount $i pid $pid quo $cur_quorum" >> $T_TMP.log
|
||||
mounted[$i]=1
|
||||
(( nr_mounted++ ))
|
||||
if [ "$i" -lt "$quorum_nr" ]; then
|
||||
(( cur_quorum++ ))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "waiting (secs $SECONDS)" >> $T_TMP.log
|
||||
for p in $pids; do
|
||||
t_quiet wait $p
|
||||
wait $p
|
||||
rc=$?
|
||||
if [ "$rc" != 0 ]; then
|
||||
echo "waiting for pid $p returned $rc"
|
||||
t_fail "background mount/umount returned error"
|
||||
fi
|
||||
done
|
||||
echo "done waiting (secs $SECONDS))" >> $T_TMP.log
|
||||
done
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#
|
||||
# verify stale btree block reading
|
||||
#
|
||||
|
||||
t_require_commands touch stat setfattr getfattr createmany
|
||||
t_require_mounts 2
|
||||
|
||||
GETFATTR="getfattr --absolute-names"
|
||||
SETFATTR="setfattr"
|
||||
|
||||
#
|
||||
# This exercises the soft retry of btree blocks when
|
||||
# inconsistent cached versions are found. It ensures that basic hard
|
||||
# error returning turns into EIO in the case where the persistent reread
|
||||
# blocks and segments really are inconsistent.
|
||||
#
|
||||
# The triggers apply across all execution in the file system. So to
|
||||
# trigger btree block retries in the client we make sure that the server
|
||||
# is running on the other node.
|
||||
#
|
||||
|
||||
cl=$(t_first_client_nr)
|
||||
sv=$(t_server_nr)
|
||||
eval cl_dir="\$T_D${cl}"
|
||||
eval sv_dir="\$T_D${sv}"
|
||||
|
||||
echo "== create file for xattr ping pong"
|
||||
touch "$sv_dir/file"
|
||||
$SETFATTR -n user.xat -v initial "$sv_dir/file"
|
||||
$GETFATTR -n user.xat "$sv_dir/file" 2>&1 | t_filter_fs
|
||||
|
||||
echo "== retry btree block read"
|
||||
$SETFATTR -n user.xat -v btree "$sv_dir/file"
|
||||
t_trigger_arm btree_stale_read $cl
|
||||
old=$(t_counter btree_stale_read $cl)
|
||||
$GETFATTR -n user.xat "$cl_dir/file" 2>&1 | t_filter_fs
|
||||
t_trigger_show btree_stale_read "after" $cl
|
||||
t_counter_diff btree_stale_read $old $cl
|
||||
|
||||
t_pass
|
||||
@@ -37,17 +37,25 @@ t_quiet make
|
||||
t_quiet sync
|
||||
# pwd stays in xfstests dir to build config and run
|
||||
|
||||
#
|
||||
# Each filesystem needs specific mkfs and mount options because we put
|
||||
# quorum member addresess in mkfs options and the metadata device in
|
||||
# mount options.
|
||||
#
|
||||
cat << EOF > local.config
|
||||
export FSTYP=scoutfs
|
||||
export MKFS_OPTIONS="-Q 1"
|
||||
export MKFS_OPTIONS="-f"
|
||||
export MKFS_TEST_OPTIONS="-Q 0,127.0.0.1,42000"
|
||||
export MKFS_SCRATCH_OPTIONS="-Q 0,127.0.0.1,43000"
|
||||
export MKFS_DEV_OPTIONS="-Q 0,127.0.0.1,44000"
|
||||
export TEST_DEV=$T_DB0
|
||||
export TEST_DIR=$T_M0
|
||||
export SCRATCH_META_DEV=$T_EX_META_DEV
|
||||
export SCRATCH_DEV=$T_EX_DATA_DEV
|
||||
export SCRATCH_MNT="$T_TMPDIR/mnt.scratch"
|
||||
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_EX_META_DEV"
|
||||
export MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
|
||||
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_EX_META_DEV"
|
||||
export MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
EOF
|
||||
|
||||
cat << EOF > local.exclude
|
||||
@@ -83,7 +91,7 @@ generic/375 # utils output change? update branch?
|
||||
EOF
|
||||
|
||||
t_restore_output
|
||||
echo "(showing output of xfstests)"
|
||||
echo " (showing output of xfstests)"
|
||||
|
||||
args="-E local.exclude ${T_XFSTESTS_ARGS:--g quick}"
|
||||
./check $args
|
||||
|
||||
@@ -1,23 +1,11 @@
|
||||
#
|
||||
# The userspace utils and kernel module share definitions of physical
|
||||
# structures and ioctls. If we're in the repo we include the kmod
|
||||
# headers directly, and hash them directly to calculate the format hash.
|
||||
#
|
||||
# If we're creating a standalone tarball for distribution we copy the
|
||||
# headers out of the kmod dir into the tarball. And then when we're
|
||||
# building in that tarball we use the headers in src/ directly.
|
||||
#
|
||||
FMTIOC_H := format.h ioctl.h
|
||||
FMTIOC_DIST := $(addprefix src/,$(FMTIOC_H))
|
||||
FMTIOC_KMOD := $(addprefix ../kmod/src/,$(FMTIOC_H))
|
||||
|
||||
ifneq ($(wildcard $(firstword $(FMTIOC_KMOD))),)
|
||||
HASH_FILES := $(FMTIOC_KMOD)
|
||||
else
|
||||
HASH_FILES := $(FMTIOC_DIST)
|
||||
endif
|
||||
SCOUTFS_FORMAT_HASH := $(shell cat $(HASH_FILES) | md5sum | cut -b1-16)
|
||||
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -g -msse4.2 \
|
||||
-fno-strict-aliasing \
|
||||
-DSCOUTFS_FORMAT_HASH=0x$(SCOUTFS_FORMAT_HASH)LLU
|
||||
|
||||
@@ -21,21 +21,19 @@ contains the filesystem's metadata.
|
||||
.sp
|
||||
This option is required.
|
||||
.TP
|
||||
.B server_addr=<ipv4:port>
|
||||
The server_addr option indicates that this mount will participate in
|
||||
quorum election to try and run a server for all the mounts of its
|
||||
filesystem. The option specifies the local TCP IPv4 address that the
|
||||
mount's elected server will listen on for connections from all other
|
||||
mounts of the filesystem.
|
||||
.B quorum_slot_nr=<number>
|
||||
The quorum_slot_nr option assigns a quorum member slot to the mount.
|
||||
The mount will use the slot assignment to claim exclusive ownership of
|
||||
the slot's configured address and an associated metadata device block.
|
||||
Each slot number must be used by only one mount at any given time.
|
||||
.sp
|
||||
The IPv4 address must be specified as a dotted quad, name resolution is
|
||||
not supported. A specific port may be provided after a seperating
|
||||
colon. If no port is specified then a random port will be chosen. The
|
||||
address will be used for the lifetime of the mount and can not be
|
||||
changed. The mount must be unmounted to specify a different address.
|
||||
When a mount is assigned a quorum slot it becomes a quorum member and
|
||||
will participate in the raft leader election process and could start
|
||||
the server for the filesystem if it is elected leader.
|
||||
.sp
|
||||
If server_addr is not specified then the mount will read the filesystem
|
||||
until it sees the address of an elected server to connect to.
|
||||
The assigned number must match one of the slots defined with \-Q options
|
||||
when the filesystem was created with mkfs. If the number assigned
|
||||
doesn't match a number created during mkfs then the mount will fail.
|
||||
.SH FURTHER READING
|
||||
A
|
||||
.B scoutfs
|
||||
|
||||
@@ -32,7 +32,7 @@ A path within a ScoutFS filesystem.
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-count} NUM [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-slot} NR,ADDR,PORT [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
|
||||
.sp
|
||||
Initialize a new ScoutFS filesystem on the target devices. Since ScoutFS uses
|
||||
separate block devices for its metadata and data storage, two are required.
|
||||
@@ -57,18 +57,20 @@ a faster block device for the metadata device.
|
||||
The path to the block device to be used for ScoutFS file data. If possible, use
|
||||
a larger block device for the data device.
|
||||
.TP
|
||||
.B "-Q, --quorum-count NUM"
|
||||
The number of mounts needed to reach quorum and elect one
|
||||
to be the server. Mounts of the filesystem will hang until a quorum of
|
||||
mounts are operational.
|
||||
.sp
|
||||
Mounts with the
|
||||
.B server_addr
|
||||
mount option participate in quorum. The safest quorum number is the
|
||||
smallest majority of an odd number of participating mounts. For
|
||||
example,
|
||||
two out of three total mounts. This ensures that there can only be one
|
||||
set of mounts that can establish quorum.
|
||||
.B "-Q, --quorum-slot NR,ADDR,PORT"
|
||||
Each \-Q option configures a quorum slot. The NR specifies the number
|
||||
of the slot to configure which must be between 0 and 14. Each slot
|
||||
number must only be used once, but they can be used in any order and
|
||||
they need not be consecutive. This is to allow natural relationships
|
||||
between slot numbers and nodes which may have arbitrary numbering
|
||||
schemes. ADDR and PORT are the numerical IPv4 address and port which
|
||||
will be used as the UDP endpoint for leader elections and as the TCP
|
||||
listening address for server connections. The number of configured
|
||||
slots determines the size of the quorum of member mounts which must be
|
||||
present to start the server for the filesystem to operate. A simple
|
||||
majority is typically required, while one mount is sufficient if only
|
||||
one or two slots are configured. Until the majority quorum are present,
|
||||
all mounts will hang waiting for a server to connect to.
|
||||
.TP
|
||||
.B "-m, --max-meta-size SIZE"
|
||||
Limit the space used by ScoutFS on the metadata device to the
|
||||
|
||||
@@ -25,17 +25,13 @@ static void init_block(struct scoutfs_btree_block *bt, int level)
|
||||
*/
|
||||
void btree_init_root_single(struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 seq, __le64 fsid)
|
||||
u64 seq, u64 blkno)
|
||||
{
|
||||
root->ref.blkno = cpu_to_le64(blkno);
|
||||
root->ref.seq = cpu_to_le64(1);
|
||||
root->ref.seq = cpu_to_le64(seq);
|
||||
root->height = 1;
|
||||
|
||||
memset(bt, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
|
||||
bt->hdr.fsid = fsid;
|
||||
bt->hdr.blkno = cpu_to_le64(blkno);
|
||||
bt->hdr.seq = cpu_to_le64(1);
|
||||
|
||||
init_block(bt, 0);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
void btree_init_root_single(struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 seq, __le64 fsid);
|
||||
u64 seq, u64 blkno);
|
||||
|
||||
void btree_append_item(struct scoutfs_btree_block *bt,
|
||||
struct scoutfs_key *key, void *val, int val_len);
|
||||
|
||||
188
utils/src/mkfs.c
188
utils/src/mkfs.c
@@ -32,12 +32,22 @@
|
||||
#include "leaf_item_hash.h"
|
||||
#include "blkid.h"
|
||||
|
||||
static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
|
||||
/*
|
||||
* Update the block header fields and write out the block.
|
||||
*/
|
||||
static int write_block(int fd, u32 magic, __le64 fsid, u64 seq, u64 blkno,
|
||||
int shift, struct scoutfs_block_header *hdr)
|
||||
{
|
||||
size_t size = 1ULL << shift;
|
||||
ssize_t ret;
|
||||
|
||||
ret = pwrite(fd, blk, size, blkno << shift);
|
||||
hdr->magic = cpu_to_le32(magic);
|
||||
hdr->fsid = fsid;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->seq = cpu_to_le64(seq);
|
||||
hdr->crc = cpu_to_le32(crc_block(hdr, size));
|
||||
|
||||
ret = pwrite(fd, hdr, size, blkno << shift);
|
||||
if (ret != size) {
|
||||
fprintf(stderr, "write to blkno %llu returned %zd: %s (%d)\n",
|
||||
blkno, ret, strerror(errno), errno);
|
||||
@@ -47,35 +57,18 @@ static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the block's header and write it out.
|
||||
*/
|
||||
static int write_block(int fd, u64 blkno, int shift,
|
||||
struct scoutfs_super_block *super,
|
||||
struct scoutfs_block_header *hdr)
|
||||
{
|
||||
size_t size = 1ULL << shift;
|
||||
|
||||
if (super)
|
||||
*hdr = super->hdr;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->crc = cpu_to_le32(crc_block(hdr, size));
|
||||
|
||||
return write_raw_block(fd, blkno, shift, hdr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write the single btree block that contains the blkno and len indexed
|
||||
* items to store the given extent, and update the root to point to it.
|
||||
*/
|
||||
static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
static int write_alloc_root(int fd, __le64 fsid,
|
||||
struct scoutfs_alloc_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 start, u64 len)
|
||||
u64 seq, u64 blkno, u64 start, u64 len)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
|
||||
btree_init_root_single(&root->root, bt, blkno, 1, super->hdr.fsid);
|
||||
btree_init_root_single(&root->root, bt, seq, blkno);
|
||||
root->total_len = cpu_to_le64(len);
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
@@ -94,19 +87,18 @@ static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
key.skfl_blkno = cpu_to_le64(start);
|
||||
btree_append_item(bt, &key, NULL, 0);
|
||||
|
||||
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
|
||||
SCOUTFS_BLOCK_LG_SIZE));
|
||||
|
||||
return write_raw_block(fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
|
||||
return write_block(fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, seq, blkno,
|
||||
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
|
||||
}
|
||||
|
||||
struct mkfs_args {
|
||||
unsigned long long quorum_count;
|
||||
char *meta_device;
|
||||
char *data_device;
|
||||
unsigned long long max_meta_size;
|
||||
unsigned long long max_data_size;
|
||||
bool force;
|
||||
int nr_slots;
|
||||
struct scoutfs_quorum_slot slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -124,12 +116,14 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
struct scoutfs_inode inode;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_btree_block *bt = NULL;
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct scoutfs_key key;
|
||||
struct timeval tv;
|
||||
int meta_fd = -1;
|
||||
int data_fd = -1;
|
||||
char uuid_str[37];
|
||||
void *zeros = NULL;
|
||||
char *indent;
|
||||
u64 blkno;
|
||||
u64 meta_size;
|
||||
u64 data_size;
|
||||
@@ -139,10 +133,12 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
u64 last_data;
|
||||
u64 meta_start;
|
||||
u64 meta_len;
|
||||
__le64 fsid;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
pseudo_random_bytes(&fsid, sizeof(fsid));
|
||||
|
||||
meta_fd = open(args->meta_device, O_RDWR | O_EXCL);
|
||||
if (meta_fd < 0) {
|
||||
@@ -191,10 +187,7 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* metadata blocks start after the quorum blocks */
|
||||
next_meta = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT;
|
||||
/* rest of meta dev is available for metadata blocks */
|
||||
next_meta = SCOUTFS_META_DEV_START_BLKNO;
|
||||
last_meta = (meta_size >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
|
||||
/* Data blocks go on the data dev */
|
||||
first_data = SCOUTFS_DATA_DEV_START_BLKNO;
|
||||
@@ -202,10 +195,7 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
|
||||
/* partially initialize the super so we can use it to init others */
|
||||
memset(super, 0, SCOUTFS_BLOCK_SM_SIZE);
|
||||
pseudo_random_bytes(&super->hdr.fsid, sizeof(super->hdr.fsid));
|
||||
super->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SUPER);
|
||||
super->hdr.seq = cpu_to_le64(1);
|
||||
super->format_hash = cpu_to_le64(SCOUTFS_FORMAT_HASH);
|
||||
super->version = cpu_to_le64(SCOUTFS_INTEROP_VERSION);
|
||||
uuid_generate(super->uuid);
|
||||
super->next_ino = cpu_to_le64(SCOUTFS_ROOT_INO + 1);
|
||||
super->next_trans_seq = cpu_to_le64(1);
|
||||
@@ -215,11 +205,14 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
super->total_data_blocks = cpu_to_le64(last_data - first_data + 1);
|
||||
super->first_data_blkno = cpu_to_le64(first_data);
|
||||
super->last_data_blkno = cpu_to_le64(last_data);
|
||||
super->quorum_count = args->quorum_count;
|
||||
|
||||
assert(sizeof(args->slots) ==
|
||||
member_sizeof(struct scoutfs_super_block, qconf.slots));
|
||||
memcpy(super->qconf.slots, args->slots, sizeof(args->slots));
|
||||
|
||||
/* fs root starts with root inode and its index items */
|
||||
blkno = next_meta++;
|
||||
btree_init_root_single(&super->fs_root, bt, blkno, 1, super->hdr.fsid);
|
||||
btree_init_root_single(&super->fs_root, bt, 1, blkno);
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_INODE_INDEX_ZONE;
|
||||
@@ -244,10 +237,8 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
inode.mtime.nsec = inode.atime.nsec;
|
||||
btree_append_item(bt, &key, &inode, sizeof(inode));
|
||||
|
||||
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
|
||||
SCOUTFS_BLOCK_LG_SIZE));
|
||||
|
||||
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, 1, blkno,
|
||||
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -256,11 +247,6 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
lblk = (void *)bt;
|
||||
memset(lblk, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
|
||||
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
|
||||
lblk->hdr.fsid = super->hdr.fsid;
|
||||
lblk->hdr.blkno = cpu_to_le64(blkno);
|
||||
lblk->hdr.seq = cpu_to_le64(1);
|
||||
|
||||
meta_len = (64 * 1024 * 1024) >> SCOUTFS_BLOCK_LG_SHIFT;
|
||||
for (i = 0; i < meta_len; i++) {
|
||||
lblk->blknos[i] = cpu_to_le64(next_meta);
|
||||
@@ -268,20 +254,20 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
lblk->nr = cpu_to_le32(i);
|
||||
|
||||
super->server_meta_avail[0].ref.blkno = lblk->hdr.blkno;
|
||||
super->server_meta_avail[0].ref.seq = lblk->hdr.seq;
|
||||
super->server_meta_avail[0].ref.blkno = cpu_to_le64(blkno);
|
||||
super->server_meta_avail[0].ref.seq = cpu_to_le64(1);
|
||||
super->server_meta_avail[0].total_nr = le32_to_le64(lblk->nr);
|
||||
super->server_meta_avail[0].first_nr = lblk->nr;
|
||||
|
||||
lblk->hdr.crc = cpu_to_le32(crc_block(&bt->hdr, SCOUTFS_BLOCK_LG_SIZE));
|
||||
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, lblk);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, fsid, 1,
|
||||
blkno, SCOUTFS_BLOCK_LG_SHIFT, &lblk->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* the data allocator has a single extent */
|
||||
blkno = next_meta++;
|
||||
ret = write_alloc_root(super, meta_fd, &super->data_alloc, bt,
|
||||
blkno, first_data,
|
||||
ret = write_alloc_root(meta_fd, fsid, &super->data_alloc, bt,
|
||||
1, blkno, first_data,
|
||||
le64_to_cpu(super->total_data_blocks));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -298,8 +284,8 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
/* each meta alloc root contains a portion of free metadata extents */
|
||||
for (i = 0; i < array_size(super->meta_alloc); i++) {
|
||||
blkno = next_meta++;
|
||||
ret = write_alloc_root(super, meta_fd, &super->meta_alloc[i], bt,
|
||||
blkno, meta_start,
|
||||
ret = write_alloc_root(meta_fd, fsid, &super->meta_alloc[i], bt,
|
||||
1, blkno, meta_start,
|
||||
min(meta_len,
|
||||
last_meta - meta_start + 1));
|
||||
if (ret < 0)
|
||||
@@ -309,9 +295,11 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
|
||||
/* zero out quorum blocks */
|
||||
hdr = zeros;
|
||||
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
|
||||
ret = write_raw_block(meta_fd, SCOUTFS_QUORUM_BLKNO + i,
|
||||
SCOUTFS_BLOCK_SM_SHIFT, zeros);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_QUORUM, fsid,
|
||||
1, SCOUTFS_QUORUM_BLKNO + i,
|
||||
SCOUTFS_BLOCK_SM_SHIFT, hdr);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "error zeroing quorum block: %s (%d)\n",
|
||||
strerror(-errno), -errno);
|
||||
@@ -320,9 +308,9 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
|
||||
/* write the super block to data dev and meta dev*/
|
||||
super->hdr.seq = cpu_to_le64(1);
|
||||
ret = write_block(data_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
NULL, &super->hdr);
|
||||
ret = write_block(data_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid, 1,
|
||||
SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
&super->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -334,8 +322,9 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
|
||||
super->flags |= cpu_to_le64(SCOUTFS_FLAG_IS_META_BDEV);
|
||||
ret = write_block(meta_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
NULL, &super->hdr);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid,
|
||||
1, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
&super->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -352,21 +341,35 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
" meta device path: %s\n"
|
||||
" data device path: %s\n"
|
||||
" fsid: %llx\n"
|
||||
" format hash: %llx\n"
|
||||
" version: %llx\n"
|
||||
" uuid: %s\n"
|
||||
" 64KB metadata blocks: "SIZE_FMT"\n"
|
||||
" 4KB data blocks: "SIZE_FMT"\n"
|
||||
" quorum count: %u\n",
|
||||
" quorum slots: ",
|
||||
args->meta_device,
|
||||
args->data_device,
|
||||
le64_to_cpu(super->hdr.fsid),
|
||||
le64_to_cpu(super->format_hash),
|
||||
le64_to_cpu(super->version),
|
||||
uuid_str,
|
||||
SIZE_ARGS(le64_to_cpu(super->total_meta_blocks),
|
||||
SCOUTFS_BLOCK_LG_SIZE),
|
||||
SIZE_ARGS(le64_to_cpu(super->total_data_blocks),
|
||||
SCOUTFS_BLOCK_SM_SIZE),
|
||||
super->quorum_count);
|
||||
SCOUTFS_BLOCK_SM_SIZE));
|
||||
|
||||
indent = "";
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
struct scoutfs_quorum_slot *sl = &super->qconf.slots[i];
|
||||
struct in_addr in;
|
||||
|
||||
if (sl->addr.addr == 0)
|
||||
continue;
|
||||
|
||||
in.s_addr = htonl(le32_to_cpu(sl->addr.addr));
|
||||
printf("%s%u: %s:%u", indent,
|
||||
i, inet_ntoa(in), le16_to_cpu(sl->addr.port));
|
||||
indent = "\n ";
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
@@ -383,16 +386,55 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
|
||||
{
|
||||
struct in_addr in;
|
||||
bool valid = true;
|
||||
char *addr;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (slots[i].addr.addr == 0)
|
||||
continue;
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (slots[j].addr.addr == 0)
|
||||
continue;
|
||||
|
||||
if (slots[i].addr.addr == slots[j].addr.addr &&
|
||||
slots[i].addr.port == slots[j].addr.port) {
|
||||
|
||||
in.s_addr =
|
||||
htonl(le32_to_cpu(slots[i].addr.addr));
|
||||
addr = inet_ntoa(in);
|
||||
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
|
||||
i, j, addr,
|
||||
le16_to_cpu(slots[i].addr.port));
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct mkfs_args *args = state->input;
|
||||
struct scoutfs_quorum_slot slot;
|
||||
int ret;
|
||||
|
||||
switch (key) {
|
||||
case 'Q':
|
||||
ret = parse_u64(arg, &args->quorum_count);
|
||||
if (ret)
|
||||
ret = parse_quorum_slot(&slot, arg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (args->slots[ret].addr.addr != 0)
|
||||
argp_error(state, "Quorum slot %u already specified before slot '%s'\n",
|
||||
ret, arg);
|
||||
args->slots[ret] = slot;
|
||||
args->nr_slots++;
|
||||
break;
|
||||
case 'f':
|
||||
args->force = true;
|
||||
@@ -432,12 +474,14 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
argp_error(state, "more than two arguments given");
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->quorum_count)
|
||||
argp_error(state, "must provide nonzero quorum count with --quorum-count|-Q option");
|
||||
if (!args->nr_slots)
|
||||
argp_error(state, "must specify at least one quorum slot with --quorum-count|-Q");
|
||||
if (!args->meta_device)
|
||||
argp_error(state, "no metadata device argument given");
|
||||
if (!args->data_device)
|
||||
argp_error(state, "no data device argument given");
|
||||
if (!valid_quorum_slots(args->slots))
|
||||
argp_error(state, "invalid quorum slot configuration");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -447,7 +491,7 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
}
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "quorum-count", 'Q', "NUM", 0, "Number of voters required to use the filesystem [Required]"},
|
||||
{ "quorum-slot", 'Q', "NR,ADDR,PORT", 0, "Specify quorum slot addresses [Required]"},
|
||||
{ "force", 'f', NULL, 0, "Overwrite existing data on block devices"},
|
||||
{ "max-meta-size", 'm', "SIZE", 0, "Use a size less than the base metadata device size (bytes or KMGTP units)"},
|
||||
{ "max-data-size", 'd', "SIZE", 0, "Use a size less than the base data device size (bytes or KMGTP units)"},
|
||||
@@ -463,7 +507,7 @@ static struct argp argp = {
|
||||
|
||||
static int mkfs_cmd(int argc, char *argv[])
|
||||
{
|
||||
struct mkfs_args mkfs_args = {0};
|
||||
struct mkfs_args mkfs_args = {NULL,};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &mkfs_args);
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "util.h"
|
||||
@@ -152,3 +155,65 @@ int parse_timespec(char *str, struct timespec *ts)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse a quorum slot specification string "NR,ADDR,PORT" into its
|
||||
* component parts. We use sscanf to both parse the leading NR and
|
||||
* trailing PORT integers, and to pull out the inner ADDR string which
|
||||
* is then parsed to make sure that it's a valid unicast ipv4 address.
|
||||
* We require that all components be specified, and sccanf will check
|
||||
* this by the number of matches it returns.
|
||||
*/
|
||||
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
|
||||
{
|
||||
#define ADDR_CHARS 45 /* max ipv6 */
|
||||
char addr[ADDR_CHARS + 1] = {'\0',};
|
||||
struct in_addr in;
|
||||
int port;
|
||||
int parsed;
|
||||
int nr;
|
||||
int ret;
|
||||
|
||||
/* leading and trailing ints, an inner sized string without ,, all separated by , */
|
||||
ret = sscanf(arg, "%u,%"__stringify(ADDR_CHARS)"[^,],%u%n",
|
||||
&nr, addr, &port, &parsed);
|
||||
if (ret == EOF) {
|
||||
printf("error parsing quorum slot '%s': %s\n",
|
||||
arg, strerror(errno));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (parsed != strlen(arg)) {
|
||||
printf("extra unparsed trailing characters in quorum slot '%s'\n",
|
||||
arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ret != 3) {
|
||||
printf("failed to parse all three NR,ADDR,PORT tokens in quorum slot '%s'\n", arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
printf("invalid nr '%d' in quorum slot '%s', must be between 0 and %u\n",
|
||||
nr, arg, SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (port <= 0 || port > USHRT_MAX) {
|
||||
printf("invalid ipv4 port '%u' in quorum slot '%s', must be between 1 and %u\n",
|
||||
port, arg, USHRT_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (inet_aton(addr, &in) == 0 || htonl(in.s_addr) == 0 ||
|
||||
htonl(in.s_addr) == UINT_MAX) {
|
||||
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
|
||||
addr, arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
slot->addr.addr = cpu_to_le32(htonl(in.s_addr));
|
||||
slot->addr.port = cpu_to_le16(port);
|
||||
return nr;
|
||||
}
|
||||
|
||||
@@ -4,11 +4,14 @@
|
||||
#include <sys/time.h>
|
||||
#include <argp.h>
|
||||
|
||||
struct scoutfs_quorum_slot;
|
||||
|
||||
int parse_human(char* str, u64 *val_ret);
|
||||
int parse_u64(char *str, u64 *val_ret);
|
||||
int parse_s64(char *str, s64 *val_ret);
|
||||
int parse_u32(char *str, u32 *val_ret);
|
||||
int parse_timespec(char *str, struct timespec *ts);
|
||||
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg);
|
||||
|
||||
static inline char* strdup_or_error(const struct argp_state *state, char *str)
|
||||
{
|
||||
|
||||
@@ -388,10 +388,10 @@ static int print_alloc_item(struct scoutfs_key *key, void *val,
|
||||
typedef int (*print_item_func)(struct scoutfs_key *key, void *val,
|
||||
unsigned val_len, void *arg);
|
||||
|
||||
static int print_btree_ref(struct scoutfs_key *key, void *val,
|
||||
static int print_block_ref(struct scoutfs_key *key, void *val,
|
||||
unsigned val_len, print_item_func func, void *arg)
|
||||
{
|
||||
struct scoutfs_btree_ref *ref = val;
|
||||
struct scoutfs_block_ref *ref = val;
|
||||
|
||||
func(key, NULL, 0, arg);
|
||||
printf(" ref blkno %llu seq %llu\n",
|
||||
@@ -433,7 +433,7 @@ static void print_leaf_item_hash(struct scoutfs_btree_block *bt)
|
||||
}
|
||||
|
||||
static int print_btree_block(int fd, struct scoutfs_super_block *super,
|
||||
char *which, struct scoutfs_btree_ref *ref,
|
||||
char *which, struct scoutfs_block_ref *ref,
|
||||
print_item_func func, void *arg, u8 level)
|
||||
{
|
||||
struct scoutfs_btree_item *item;
|
||||
@@ -500,7 +500,7 @@ static int print_btree_block(int fd, struct scoutfs_super_block *super,
|
||||
val_len);
|
||||
|
||||
if (level)
|
||||
print_btree_ref(key, val, val_len, func, arg);
|
||||
print_block_ref(key, val, val_len, func, arg);
|
||||
else
|
||||
func(key, val, val_len, arg);
|
||||
}
|
||||
@@ -531,11 +531,10 @@ static int print_btree(int fd, struct scoutfs_super_block *super, char *which,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int print_alloc_list_block(int fd, char *str,
|
||||
struct scoutfs_alloc_list_ref *ref)
|
||||
static int print_alloc_list_block(int fd, char *str, struct scoutfs_block_ref *ref)
|
||||
{
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_alloc_list_ref next;
|
||||
struct scoutfs_block_ref next;
|
||||
u64 blkno;
|
||||
u64 start;
|
||||
u64 len;
|
||||
@@ -583,7 +582,7 @@ static int print_alloc_list_block(int fd, char *str,
|
||||
return print_alloc_list_block(fd, str, &next);
|
||||
}
|
||||
|
||||
static int print_srch_block(int fd, struct scoutfs_srch_ref *ref, int level)
|
||||
static int print_srch_block(int fd, struct scoutfs_block_ref *ref, int level)
|
||||
{
|
||||
struct scoutfs_srch_parent *srp;
|
||||
struct scoutfs_srch_block *srb;
|
||||
@@ -729,7 +728,7 @@ static int print_srch_root_files(struct scoutfs_key *key, void *val,
|
||||
}
|
||||
|
||||
static int print_btree_leaf_items(int fd, struct scoutfs_super_block *super,
|
||||
struct scoutfs_btree_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
print_item_func func, void *arg)
|
||||
{
|
||||
struct scoutfs_btree_item *item;
|
||||
@@ -796,14 +795,25 @@ static char *alloc_addr_str(struct scoutfs_inet_addr *ia)
|
||||
return str;
|
||||
}
|
||||
|
||||
#define OFF_NAME(x) \
|
||||
{ offsetof(struct scoutfs_quorum_block, x), __stringify_1(x) }
|
||||
|
||||
static int print_quorum_blocks(int fd, struct scoutfs_super_block *super)
|
||||
{
|
||||
struct print_events {
|
||||
size_t offset;
|
||||
char *name;
|
||||
} events[] = {
|
||||
OFF_NAME(write), OFF_NAME(update_term), OFF_NAME(set_leader),
|
||||
OFF_NAME(clear_leader), OFF_NAME(fenced),
|
||||
};
|
||||
struct scoutfs_quorum_block *blk = NULL;
|
||||
struct scoutfs_quorum_block_event *ev;
|
||||
char *log_addr = NULL;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
int i;
|
||||
int j;
|
||||
int e;
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
|
||||
blkno = SCOUTFS_QUORUM_BLKNO + i;
|
||||
@@ -812,31 +822,21 @@ static int print_quorum_blocks(int fd, struct scoutfs_super_block *super)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (blk->voter_rid != 0) {
|
||||
printf("quorum block blkno %llu\n"
|
||||
" fsid %llx blkno %llu crc 0x%08x\n"
|
||||
" term %llu write_nr %llu voter_rid %016llx "
|
||||
"vote_for_rid %016llx\n"
|
||||
" log_nr %u\n",
|
||||
blkno, le64_to_cpu(blk->fsid),
|
||||
le64_to_cpu(blk->blkno), le32_to_cpu(blk->crc),
|
||||
le64_to_cpu(blk->term),
|
||||
le64_to_cpu(blk->write_nr),
|
||||
le64_to_cpu(blk->voter_rid),
|
||||
le64_to_cpu(blk->vote_for_rid),
|
||||
blk->log_nr);
|
||||
for (j = 0; j < blk->log_nr; j++) {
|
||||
free(log_addr);
|
||||
log_addr = alloc_addr_str(&blk->log[j].addr);
|
||||
if (!log_addr) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
printf(" [%u]: term %llu rid %llu addr %s\n",
|
||||
j, le64_to_cpu(blk->log[j].term),
|
||||
le64_to_cpu(blk->log[j].rid),
|
||||
log_addr);
|
||||
}
|
||||
printf("quorum blkno %llu (slot %llu)\n",
|
||||
blkno, blkno - SCOUTFS_QUORUM_BLKNO);
|
||||
print_block_header(&blk->hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
printf(" term %llu random_write_mark 0x%llx flags 0x%llx\n",
|
||||
le64_to_cpu(blk->term),
|
||||
le64_to_cpu(blk->random_write_mark),
|
||||
le64_to_cpu(blk->flags));
|
||||
|
||||
for (e = 0; e < array_size(events); e++) {
|
||||
ev = (void *)blk + events[e].offset;
|
||||
|
||||
printf(" %12s: rid %016llx ts %llu.%08u\n",
|
||||
events[e].name, le64_to_cpu(ev->rid),
|
||||
le64_to_cpu(ev->ts.sec),
|
||||
le32_to_cpu(ev->ts.nsec));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -850,7 +850,8 @@ out:
|
||||
static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
{
|
||||
char uuid_str[37];
|
||||
char *server_addr;
|
||||
char *addr;
|
||||
int i;
|
||||
|
||||
uuid_unparse(super->uuid, uuid_str);
|
||||
|
||||
@@ -860,20 +861,14 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
|
||||
printf("super blkno %llu\n", blkno);
|
||||
print_block_header(&super->hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
printf(" format_hash %llx uuid %s\n",
|
||||
le64_to_cpu(super->format_hash), uuid_str);
|
||||
printf(" version %llx uuid %s\n",
|
||||
le64_to_cpu(super->version), uuid_str);
|
||||
printf(" flags: 0x%016llx\n", le64_to_cpu(super->flags));
|
||||
|
||||
server_addr = alloc_addr_str(&super->server_addr);
|
||||
if (!server_addr)
|
||||
return;
|
||||
|
||||
/* XXX these are all in a crazy order */
|
||||
printf(" next_ino %llu next_trans_seq %llu\n"
|
||||
" total_meta_blocks %llu first_meta_blkno %llu last_meta_blkno %llu\n"
|
||||
" total_data_blocks %llu first_data_blkno %llu last_data_blkno %llu\n"
|
||||
" quorum_fenced_term %llu quorum_server_term %llu unmount_barrier %llu\n"
|
||||
" quorum_count %u server_addr %s\n"
|
||||
" meta_alloc[0]: "ALCROOT_F"\n"
|
||||
" meta_alloc[1]: "ALCROOT_F"\n"
|
||||
" data_alloc: "ALCROOT_F"\n"
|
||||
@@ -894,11 +889,6 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
le64_to_cpu(super->total_data_blocks),
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno),
|
||||
le64_to_cpu(super->quorum_fenced_term),
|
||||
le64_to_cpu(super->quorum_server_term),
|
||||
le64_to_cpu(super->unmount_barrier),
|
||||
super->quorum_count,
|
||||
server_addr,
|
||||
ALCROOT_A(&super->meta_alloc[0]),
|
||||
ALCROOT_A(&super->meta_alloc[1]),
|
||||
ALCROOT_A(&super->data_alloc),
|
||||
@@ -922,7 +912,19 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
le64_to_cpu(super->fs_root.ref.blkno),
|
||||
le64_to_cpu(super->fs_root.ref.seq));
|
||||
|
||||
free(server_addr);
|
||||
printf(" quorum config version %llu\n",
|
||||
le64_to_cpu(super->qconf.version));
|
||||
for (i = 0; i < array_size(super->qconf.slots); i++) {
|
||||
if (!super->qconf.slots[i].addr.addr &&
|
||||
!super->qconf.slots[i].addr.port)
|
||||
continue;
|
||||
|
||||
addr = alloc_addr_str(&super->qconf.slots[i].addr);
|
||||
if (addr) {
|
||||
printf(" quorum slot %2u: %s\n", i, addr);
|
||||
free(addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct print_args {
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <assert.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
@@ -208,9 +207,6 @@ static int do_release(struct release_args *args)
|
||||
return ret;
|
||||
}
|
||||
|
||||
assert(args->offset % SCOUTFS_BLOCK_SM_SIZE == 0);
|
||||
assert(args->length % SCOUTFS_BLOCK_SM_SIZE == 0);
|
||||
|
||||
ioctl_args.offset = args->offset;
|
||||
ioctl_args.length = args->length;
|
||||
ioctl_args.data_version = args->data_version;
|
||||
|
||||
@@ -114,4 +114,7 @@ static inline int memcmp_lens(const void *a, int a_len,
|
||||
int get_path(char *path, int flags);
|
||||
int read_block(int fd, u64 blkno, int shift, void **ret_val);
|
||||
|
||||
#define __stringify_1(x) #x
|
||||
#define __stringify(x) __stringify_1(x)
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user