mirror of
https://github.com/versity/scoutfs.git
synced 2026-05-01 02:15:44 +00:00
Compare commits
124 Commits
v0.0.3
...
zab/block_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3290771a0 | ||
|
|
cf3cb3f197 | ||
|
|
cb4ed98b3c | ||
|
|
9ee7f7b9dc | ||
|
|
300791ecfa | ||
|
|
4630b77b45 | ||
|
|
bdc43ca634 | ||
|
|
6406f05350 | ||
|
|
820b7295f0 | ||
|
|
b3611103ee | ||
|
|
0deb232d3f | ||
|
|
1366e254f9 | ||
|
|
1259f899a3 | ||
|
|
2d393f435b | ||
|
|
09c879bcf1 | ||
|
|
3de703757f | ||
|
|
7d67489b0c | ||
|
|
73084462e9 | ||
|
|
8c81af2b9b | ||
|
|
efe5d92458 | ||
|
|
d39e56d953 | ||
|
|
5661a1fb02 | ||
|
|
12fa289399 | ||
|
|
75e8fab57c | ||
|
|
513d6b2734 | ||
|
|
f8d39610a2 | ||
|
|
c470c1c9f6 | ||
|
|
cad902b9cd | ||
|
|
e163f3b099 | ||
|
|
a508baae76 | ||
|
|
208c51d1d2 | ||
|
|
9450959ca4 | ||
|
|
6237f0adc5 | ||
|
|
f18fa0e97a | ||
|
|
0969a94bfc | ||
|
|
b1b75cbe9f | ||
|
|
0f14826ff8 | ||
|
|
336d521e44 | ||
|
|
4fab75b862 | ||
|
|
f6f72e7eae | ||
|
|
9878312b4d | ||
|
|
7421bd1861 | ||
|
|
1db6f8194d | ||
|
|
2de7692336 | ||
|
|
8c1d96898a | ||
|
|
090646aaeb | ||
|
|
d53350f9f1 | ||
|
|
57f34e90e9 | ||
|
|
79f6878355 | ||
|
|
740e13e53a | ||
|
|
dbb716f1bb | ||
|
|
87fcad5428 | ||
|
|
406d157891 | ||
|
|
8e34c5d66a | ||
|
|
1c7bbd6260 | ||
|
|
3ad18b0f3b | ||
|
|
79cd7a499b | ||
|
|
6ad18769cb | ||
|
|
49d82fcaaf | ||
|
|
e4e12c1968 | ||
|
|
15fd2ccc02 | ||
|
|
eea95357d3 | ||
|
|
9842c5d13e | ||
|
|
ade539217e | ||
|
|
5a90234c94 | ||
|
|
f81e4cb98a | ||
|
|
1fc706bf3f | ||
|
|
e9c3aa6501 | ||
|
|
d39268bbc1 | ||
|
|
35ed1a2438 | ||
|
|
32e7978a6e | ||
|
|
8123b8fc35 | ||
|
|
da5911c311 | ||
|
|
098fc420be | ||
|
|
7a96537210 | ||
|
|
0607dfdac8 | ||
|
|
0354bb64c5 | ||
|
|
631801c45c | ||
|
|
47a1ac92f7 | ||
|
|
004f693af3 | ||
|
|
f271a5d140 | ||
|
|
355eac79d2 | ||
|
|
d8b4e94854 | ||
|
|
bed33c7ffd | ||
|
|
b370730029 | ||
|
|
d64dd89ead | ||
|
|
8d81196e01 | ||
|
|
d731c1577e | ||
|
|
a421bb0884 | ||
|
|
773eb129ed | ||
|
|
eb3981c103 | ||
|
|
3139d3ea68 | ||
|
|
4da3d47601 | ||
|
|
aa1b1fa34f | ||
|
|
8fcc9095e6 | ||
|
|
299062a456 | ||
|
|
7cac1e7136 | ||
|
|
454dbebf59 | ||
|
|
2c5871c253 | ||
|
|
64a698aa93 | ||
|
|
d48b447e75 | ||
|
|
5241bba7f6 | ||
|
|
e0a2175c2e | ||
|
|
f2cd1003f6 | ||
|
|
97c6cc559e | ||
|
|
7c54c86c38 | ||
|
|
e1ba508301 | ||
|
|
f35154eb19 | ||
|
|
7befc61482 | ||
|
|
1383ca1a8d | ||
|
|
6b5ddf2b3a | ||
|
|
d025122fdd | ||
|
|
706fe9a30e | ||
|
|
0f17ecb9e3 | ||
|
|
fc003a5038 | ||
|
|
10df01eb7a | ||
|
|
68b8e4098d | ||
|
|
5701184324 | ||
|
|
a3035582d3 | ||
|
|
9e47a32257 | ||
|
|
b4592554af | ||
|
|
1e0f8ee27a | ||
|
|
511cb04330 | ||
|
|
807ae11ee9 |
45
README.md
45
README.md
@@ -31,15 +31,9 @@ functionality hasn't been implemented. It's appropriate for early
|
||||
adopters and interested developers, not for production use.
|
||||
|
||||
In that vein, expect significant incompatible changes to both the format
|
||||
of network messages and persistent structures. To avoid mistakes the
|
||||
implementation currently calculates a hash of the format and ioctl
|
||||
header files in the source tree. The kernel module will refuse to mount
|
||||
a volume created by userspace utilities with a mismatched hash, and it
|
||||
will refuse to connect to a remote node with a mismatched hash. This
|
||||
means having to unmount, mkfs, and remount everything across many
|
||||
functional changes. Once the format is nailed down we'll wire up
|
||||
forward and back compat machinery and remove this temporary safety
|
||||
measure.
|
||||
of network messages and persistent structures. Since the format hash-checking
|
||||
has now been removed in preparation for release, if there is any doubt, mkfs
|
||||
is strongly recommended.
|
||||
|
||||
The current kernel module is developed against the RHEL/CentOS 7.x
|
||||
kernel to minimize the friction of developing and testing with partners'
|
||||
@@ -71,8 +65,13 @@ The steps for getting scoutfs mounted and operational are:
|
||||
2. Make a new filesystem on the devices with the userspace utilities
|
||||
3. Mount the devices on all the nodes
|
||||
|
||||
In this example we run all of these commands on three nodes. The names
|
||||
of the block devices are the same on all the nodes.
|
||||
In this example we use three nodes. The names of the block devices are
|
||||
the same on all the nodes. Two of the nodes will be quorum members. A
|
||||
majority of quorum members must be mounted to elect a leader to run a
|
||||
server that all the mounts connect to. It should be noted that two
|
||||
quorum members results in a majority of one, each member itself, so
|
||||
split brain elections are possible but so unlikely that it's fine for a
|
||||
demonstration.
|
||||
|
||||
1. Get the Kernel Module and Userspace Binaries
|
||||
|
||||
@@ -94,24 +93,30 @@ of the block devices are the same on all the nodes.
|
||||
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
|
||||
```
|
||||
|
||||
2. Make a New Filesystem (**destroys contents, no questions asked**)
|
||||
2. Make a New Filesystem (**destroys contents**)
|
||||
|
||||
We specify that two of our three nodes must be present to form a
|
||||
quorum for the system to function.
|
||||
We specify quorum slots with the addresses of each of the quorum
|
||||
member nodes, the metadata device, and the data device.
|
||||
|
||||
```shell
|
||||
scoutfs mkfs -Q 2 /dev/meta_dev /dev/data_dev
|
||||
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
|
||||
```
|
||||
|
||||
3. Mount the Filesystem
|
||||
|
||||
Each mounting node provides its local IP address on which it will run
|
||||
an internal server for the other mounts if it is elected the leader by
|
||||
the quorum.
|
||||
First, mount each of the quorum nodes so that they can elect and
|
||||
start a server for the remaining node to connect to. The slot numbers
|
||||
were specified with the leading "0,..." and "1,..." in the mkfs options
|
||||
above.
|
||||
|
||||
```shell
|
||||
mkdir /mnt/scoutfs
|
||||
mount -t scoutfs -o server_addr=$NODE_ADDR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
Then mount the remaining node which can now connect to the running server.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
4. For Kicks, Observe the Metadata Change Index
|
||||
|
||||
@@ -16,11 +16,7 @@ SCOUTFS_GIT_DESCRIBE := \
|
||||
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
|
||||
echo no-git)
|
||||
|
||||
SCOUTFS_FORMAT_HASH := \
|
||||
$(shell cat src/format.h src/ioctl.h | md5sum | cut -b1-16)
|
||||
|
||||
SCOUTFS_ARGS := SCOUTFS_GIT_DESCRIBE=$(SCOUTFS_GIT_DESCRIBE) \
|
||||
SCOUTFS_FORMAT_HASH=$(SCOUTFS_FORMAT_HASH) \
|
||||
CONFIG_SCOUTFS_FS=m -C $(SK_KSRC) M=$(CURDIR)/src \
|
||||
EXTRA_CFLAGS="-Werror"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
obj-$(CONFIG_SCOUTFS_FS) := scoutfs.o
|
||||
|
||||
CFLAGS_super.o = -DSCOUTFS_GIT_DESCRIBE=\"$(SCOUTFS_GIT_DESCRIBE)\" \
|
||||
-DSCOUTFS_FORMAT_HASH=0x$(SCOUTFS_FORMAT_HASH)LLU
|
||||
CFLAGS_super.o = -DSCOUTFS_GIT_DESCRIBE=\"$(SCOUTFS_GIT_DESCRIBE)\"
|
||||
|
||||
CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
|
||||
|
||||
|
||||
232
kmod/src/alloc.c
232
kmod/src/alloc.c
@@ -252,7 +252,7 @@ void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
|
||||
{
|
||||
memset(alloc, 0, sizeof(struct scoutfs_alloc));
|
||||
|
||||
spin_lock_init(&alloc->lock);
|
||||
seqlock_init(&alloc->seqlock);
|
||||
mutex_init(&alloc->mutex);
|
||||
alloc->avail = *avail;
|
||||
alloc->freed = *freed;
|
||||
@@ -358,31 +358,24 @@ static void list_block_sort(struct scoutfs_alloc_list_block *lblk)
|
||||
|
||||
/*
|
||||
* We're always reading blocks that we own, so we shouldn't see stale
|
||||
* references. But the cached block can be stale and we can need to
|
||||
* invalidate it.
|
||||
* references but we could retry reads after dropping stale cached
|
||||
* blocks. If we do see a stale error then we've hit persistent
|
||||
* corruption.
|
||||
*/
|
||||
static int read_list_block(struct super_block *sb,
|
||||
struct scoutfs_alloc_list_ref *ref,
|
||||
static int read_list_block(struct super_block *sb, struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_block *bl = NULL;
|
||||
int ret;
|
||||
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (!IS_ERR_OR_NULL(bl) &&
|
||||
!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
|
||||
SCOUTFS_BLOCK_MAGIC_ALLOC_LIST)) {
|
||||
scoutfs_inc_counter(sb, alloc_stale_cached_list_block);
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
}
|
||||
if (IS_ERR(bl)) {
|
||||
*bl_ret = NULL;
|
||||
return PTR_ERR(bl);
|
||||
}
|
||||
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, bl_ret);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE) {
|
||||
scoutfs_inc_counter(sb, alloc_stale_list_block);
|
||||
ret = -EIO;
|
||||
}
|
||||
};
|
||||
|
||||
*bl_ret = bl;
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -396,86 +389,12 @@ static int read_list_block(struct super_block *sb,
|
||||
static int dirty_list_block(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_list_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
u64 dirty, u64 *old,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_block *cow_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
bool undo_alloc = false;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
blkno = le64_to_cpu(ref->blkno);
|
||||
if (blkno) {
|
||||
ret = read_list_block(sb, ref, &bl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_block_writer_is_dirty(sb, bl)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (dirty == 0) {
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &dirty);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
undo_alloc = true;
|
||||
}
|
||||
|
||||
cow_bl = scoutfs_block_create(sb, dirty);
|
||||
if (IS_ERR(cow_bl)) {
|
||||
ret = PTR_ERR(cow_bl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (old) {
|
||||
*old = blkno;
|
||||
} else if (blkno) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bl)
|
||||
memcpy(cow_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
|
||||
else
|
||||
memset(cow_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = cow_bl;
|
||||
cow_bl = NULL;
|
||||
|
||||
lblk = bl->data;
|
||||
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
|
||||
lblk->hdr.fsid = super->hdr.fsid;
|
||||
lblk->hdr.blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&lblk->hdr.seq, sizeof(lblk->hdr.seq));
|
||||
|
||||
ref->blkno = lblk->hdr.blkno;
|
||||
ref->seq = lblk->hdr.seq;
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, bl);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
scoutfs_block_put(sb, cow_bl);
|
||||
if (ret < 0 && undo_alloc) {
|
||||
err = scoutfs_free_meta(sb, alloc, wri, dirty);
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
}
|
||||
*bl_ret = bl;
|
||||
|
||||
return ret;
|
||||
return scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST,
|
||||
bl_ret, dirty, old);
|
||||
}
|
||||
|
||||
/* Allocate a new dirty list block if we fill up more than 3/4 of the block. */
|
||||
@@ -497,7 +416,7 @@ static int dirty_alloc_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri)
|
||||
{
|
||||
struct scoutfs_alloc_list_ref orig_freed;
|
||||
struct scoutfs_block_ref orig_freed;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_block *av_bl = NULL;
|
||||
struct scoutfs_block *fr_bl = NULL;
|
||||
@@ -607,7 +526,8 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&alloc->lock);
|
||||
write_seqlock(&alloc->seqlock);
|
||||
|
||||
lblk = alloc->dirty_avail_bl->data;
|
||||
if (WARN_ON_ONCE(lblk->nr == 0)) {
|
||||
/* shouldn't happen, transaction should commit first */
|
||||
@@ -617,7 +537,8 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
list_block_remove(&alloc->avail, lblk, 1);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&alloc->lock);
|
||||
|
||||
write_sequnlock(&alloc->seqlock);
|
||||
|
||||
out:
|
||||
if (ret < 0)
|
||||
@@ -640,7 +561,8 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&alloc->lock);
|
||||
write_seqlock(&alloc->seqlock);
|
||||
|
||||
lblk = alloc->dirty_freed_bl->data;
|
||||
if (WARN_ON_ONCE(list_block_space(lblk->nr) == 0)) {
|
||||
/* shouldn't happen, transaction should commit first */
|
||||
@@ -649,7 +571,8 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
list_block_add(&alloc->freed, lblk, blkno);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&alloc->lock);
|
||||
|
||||
write_sequnlock(&alloc->seqlock);
|
||||
|
||||
out:
|
||||
scoutfs_inc_counter(sb, alloc_free_meta);
|
||||
@@ -657,6 +580,60 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_dalloc_init(struct scoutfs_data_alloc *dalloc,
|
||||
struct scoutfs_alloc_root *data_avail)
|
||||
{
|
||||
dalloc->root = *data_avail;
|
||||
memset(&dalloc->cached, 0, sizeof(dalloc->cached));
|
||||
atomic64_set(&dalloc->total_len, le64_to_cpu(dalloc->root.total_len));
|
||||
}
|
||||
|
||||
void scoutfs_dalloc_get_root(struct scoutfs_data_alloc *dalloc,
|
||||
struct scoutfs_alloc_root *data_avail)
|
||||
{
|
||||
*data_avail = dalloc->root;
|
||||
}
|
||||
|
||||
static void dalloc_update_total_len(struct scoutfs_data_alloc *dalloc)
|
||||
{
|
||||
atomic64_set(&dalloc->total_len, le64_to_cpu(dalloc->root.total_len) +
|
||||
dalloc->cached.len);
|
||||
}
|
||||
|
||||
u64 scoutfs_dalloc_total_len(struct scoutfs_data_alloc *dalloc)
|
||||
{
|
||||
return atomic64_read(&dalloc->total_len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the current in-memory cached free extent to extent items in
|
||||
* the avail root. This should be locked by the caller just like
|
||||
* _alloc_data and _free_data.
|
||||
*/
|
||||
int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_data_alloc *dalloc)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
if (dalloc->cached.len) {
|
||||
ret = scoutfs_ext_insert(sb, &alloc_ext_ops, &args,
|
||||
dalloc->cached.start,
|
||||
dalloc->cached.len, 0, 0);
|
||||
if (ret == 0)
|
||||
memset(&dalloc->cached, 0, sizeof(dalloc->cached));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a data extent. An extent that's smaller than the requested
|
||||
* size can be returned.
|
||||
@@ -671,14 +648,13 @@ out:
|
||||
*/
|
||||
int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *root,
|
||||
struct scoutfs_extent *cached, u64 count,
|
||||
struct scoutfs_data_alloc *dalloc, u64 count,
|
||||
u64 *blkno_ret, u64 *count_ret)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.root = &dalloc->root,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
};
|
||||
struct scoutfs_extent ext;
|
||||
@@ -699,27 +675,35 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
}
|
||||
|
||||
/* smaller allocations come from a cached extent */
|
||||
if (cached->len == 0) {
|
||||
if (dalloc->cached.len == 0) {
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args, 0, 0,
|
||||
SCOUTFS_ALLOC_DATA_LG_THRESH, cached);
|
||||
SCOUTFS_ALLOC_DATA_LG_THRESH,
|
||||
&dalloc->cached);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
len = min(count, cached->len);
|
||||
len = min(count, dalloc->cached.len);
|
||||
|
||||
*blkno_ret = cached->start;
|
||||
*blkno_ret = dalloc->cached.start;
|
||||
*count_ret = len;
|
||||
|
||||
cached->start += len;
|
||||
cached->len -= len;
|
||||
dalloc->cached.start += len;
|
||||
dalloc->cached.len -= len;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Special retval meaning there wasn't space to alloc from
|
||||
* this txn. Doesn't mean filesystem is completely full.
|
||||
* Maybe upper layers want to try again.
|
||||
*/
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENOSPC;
|
||||
ret = -ENOBUFS;
|
||||
*blkno_ret = 0;
|
||||
*count_ret = 0;
|
||||
} else {
|
||||
dalloc_update_total_len(dalloc);
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, alloc_alloc_data);
|
||||
@@ -1045,7 +1029,7 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
struct scoutfs_alloc_list_head *src)
|
||||
{
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_alloc_list_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
struct scoutfs_block *prev = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
int ret = 0;
|
||||
@@ -1086,17 +1070,23 @@ out:
|
||||
|
||||
/*
|
||||
* Returns true if meta avail and free don't have room for the given
|
||||
* number of alloctions or frees.
|
||||
* number of allocations or frees. This is called at a significantly
|
||||
* higher frequency than allocations as writers try to enter
|
||||
* transactions. This is the only reader of the seqlock which gives
|
||||
* read-mostly sampling instead of bouncing a spinlock around all the
|
||||
* cores.
|
||||
*/
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr)
|
||||
{
|
||||
unsigned int seq;
|
||||
bool lo;
|
||||
|
||||
spin_lock(&alloc->lock);
|
||||
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
|
||||
list_block_space(alloc->freed.first_nr) < nr;
|
||||
spin_unlock(&alloc->lock);
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
|
||||
list_block_space(alloc->freed.first_nr) < nr;
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
|
||||
return lo;
|
||||
}
|
||||
@@ -1108,8 +1098,8 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_btree_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_btree_ref refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_srch_compact *sc;
|
||||
struct scoutfs_log_trees lt;
|
||||
|
||||
@@ -72,7 +72,8 @@
|
||||
* transaction.
|
||||
*/
|
||||
struct scoutfs_alloc {
|
||||
spinlock_t lock;
|
||||
/* writers rarely modify list_head avail/freed. readers often check for _meta_alloc_low */
|
||||
seqlock_t seqlock;
|
||||
struct mutex mutex;
|
||||
struct scoutfs_block *dirty_avail_bl;
|
||||
struct scoutfs_block *dirty_freed_bl;
|
||||
@@ -80,6 +81,18 @@ struct scoutfs_alloc {
|
||||
struct scoutfs_alloc_list_head freed;
|
||||
};
|
||||
|
||||
/*
|
||||
* A run-time data allocator. We have a cached extent in memory that is
|
||||
* a lot cheaper to work with than the extent items, and we have a
|
||||
* consistent record of the total_len that can be sampled outside of the
|
||||
* usual heavy serialization of the extent modifications.
|
||||
*/
|
||||
struct scoutfs_data_alloc {
|
||||
struct scoutfs_alloc_root root;
|
||||
struct scoutfs_extent cached;
|
||||
atomic64_t total_len;
|
||||
};
|
||||
|
||||
void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_list_head *avail,
|
||||
struct scoutfs_alloc_list_head *freed);
|
||||
@@ -92,10 +105,18 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 blkno);
|
||||
|
||||
void scoutfs_dalloc_init(struct scoutfs_data_alloc *dalloc,
|
||||
struct scoutfs_alloc_root *data_avail);
|
||||
void scoutfs_dalloc_get_root(struct scoutfs_data_alloc *dalloc,
|
||||
struct scoutfs_alloc_root *data_avail);
|
||||
u64 scoutfs_dalloc_total_len(struct scoutfs_data_alloc *dalloc);
|
||||
int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_data_alloc *dalloc);
|
||||
int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *root,
|
||||
struct scoutfs_extent *cached, u64 count,
|
||||
struct scoutfs_data_alloc *dalloc, u64 count,
|
||||
u64 *blkno_ret, u64 *count_ret);
|
||||
int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
|
||||
764
kmod/src/block.c
764
kmod/src/block.c
File diff suppressed because it is too large
Load Diff
@@ -13,27 +13,16 @@ struct scoutfs_block {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
__le32 scoutfs_block_calc_crc(struct scoutfs_block_header *hdr, u32 size);
|
||||
bool scoutfs_block_valid_crc(struct scoutfs_block_header *hdr, u32 size);
|
||||
bool scoutfs_block_valid_ref(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr,
|
||||
__le64 seq, __le64 blkno);
|
||||
|
||||
struct scoutfs_block *scoutfs_block_create(struct super_block *sb, u64 blkno);
|
||||
struct scoutfs_block *scoutfs_block_read(struct super_block *sb, u64 blkno);
|
||||
void scoutfs_block_invalidate(struct super_block *sb, struct scoutfs_block *bl);
|
||||
bool scoutfs_block_consistent_ref(struct super_block *sb,
|
||||
struct scoutfs_block *bl,
|
||||
__le64 seq, __le64 blkno, u32 magic);
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret);
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
|
||||
|
||||
void scoutfs_block_writer_init(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri);
|
||||
void scoutfs_block_writer_mark_dirty(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_block *bl);
|
||||
bool scoutfs_block_writer_is_dirty(struct super_block *sb,
|
||||
struct scoutfs_block *bl);
|
||||
int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_block_ref *ref,
|
||||
u32 magic, struct scoutfs_block **bl_ret,
|
||||
u64 dirty_blkno, u64 *ref_blkno);
|
||||
int scoutfs_block_writer_write(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri);
|
||||
void scoutfs_block_writer_forget_all(struct super_block *sb,
|
||||
|
||||
154
kmod/src/btree.c
154
kmod/src/btree.c
@@ -80,7 +80,7 @@ enum btree_walk_flags {
|
||||
BTW_NEXT = (1 << 0), /* return >= key */
|
||||
BTW_PREV = (1 << 1), /* return <= key */
|
||||
BTW_DIRTY = (1 << 2), /* cow stable blocks */
|
||||
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref */
|
||||
BTW_ALLOC = (1 << 3), /* allocate a new block for 0 ref, requires dirty */
|
||||
BTW_INSERT = (1 << 4), /* walking to insert, try splitting */
|
||||
BTW_DELETE = (1 << 5), /* walking to delete, try joining */
|
||||
};
|
||||
@@ -619,140 +619,36 @@ static void move_items(struct scoutfs_btree_block *dst,
|
||||
* This is used to lookup cached blocks, read blocks, cow blocks for
|
||||
* dirtying, and allocate new blocks.
|
||||
*
|
||||
* Btree blocks don't have rigid cache consistency. We can be following
|
||||
* block references into cached blocks that are now stale or can be
|
||||
* following a stale root into blocks that have been overwritten. If we
|
||||
* hit a block that looks stale we first invalidate the cache and retry,
|
||||
* returning -ESTALE if it still looks wrong. The caller can retry the
|
||||
* read from a more current root or decide that this is a persistent
|
||||
* error.
|
||||
* If we read a stale block we return stale so the caller can retry with
|
||||
* a newer root or return an error.
|
||||
*/
|
||||
static int get_ref_block(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, int flags,
|
||||
struct scoutfs_btree_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_btree_block *bt = NULL;
|
||||
struct scoutfs_btree_block *new;
|
||||
struct scoutfs_block *new_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
bool retried = false;
|
||||
u64 blkno;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
/* always get the current block, either to return or cow from */
|
||||
if (ref && ref->blkno) {
|
||||
retry:
|
||||
if (WARN_ON_ONCE((flags & BTW_ALLOC) && !(flags & BTW_DIRTY)))
|
||||
return -EINVAL;
|
||||
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (IS_ERR(bl)) {
|
||||
trace_scoutfs_btree_read_error(sb, ref);
|
||||
scoutfs_inc_counter(sb, btree_read_error);
|
||||
ret = PTR_ERR(bl);
|
||||
goto out;
|
||||
}
|
||||
bt = (void *)bl->data;
|
||||
|
||||
if (!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
|
||||
SCOUTFS_BLOCK_MAGIC_BTREE) ||
|
||||
scoutfs_trigger(sb, BTREE_STALE_READ)) {
|
||||
|
||||
scoutfs_inc_counter(sb, btree_stale_read);
|
||||
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
|
||||
if (!retried) {
|
||||
retried = true;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to create a new dirty copy of the block if
|
||||
* the caller asked for it. If the block is already
|
||||
* dirty then we can return it.
|
||||
*/
|
||||
if (!(flags & BTW_DIRTY) ||
|
||||
scoutfs_block_writer_is_dirty(sb, bl)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
} else if (!(flags & BTW_ALLOC)) {
|
||||
if (ref->blkno == 0 && !(flags & BTW_ALLOC)) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
prandom_bytes(&seq, sizeof(seq));
|
||||
|
||||
new_bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(new_bl)) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
BUG_ON(ret);
|
||||
ret = PTR_ERR(new_bl);
|
||||
goto out;
|
||||
}
|
||||
new = (void *)new_bl->data;
|
||||
|
||||
/* free old stable blkno we're about to overwrite */
|
||||
if (ref && ref->blkno) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri,
|
||||
le64_to_cpu(ref->blkno));
|
||||
if (ret) {
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
BUG_ON(ret);
|
||||
scoutfs_block_put(sb, new_bl);
|
||||
new_bl = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, new_bl);
|
||||
|
||||
trace_scoutfs_btree_dirty_block(sb, blkno, seq,
|
||||
bt ? le64_to_cpu(bt->hdr.blkno) : 0,
|
||||
bt ? le64_to_cpu(bt->hdr.seq) : 0);
|
||||
|
||||
if (bt) {
|
||||
/* returning a cow of an existing block */
|
||||
memcpy(new, bt, SCOUTFS_BLOCK_LG_SIZE);
|
||||
scoutfs_block_put(sb, bl);
|
||||
} else {
|
||||
/* returning a newly allocated block */
|
||||
memset(new, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
new->hdr.fsid = super->hdr.fsid;
|
||||
}
|
||||
bl = new_bl;
|
||||
bt = new;
|
||||
|
||||
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
|
||||
bt->hdr.blkno = cpu_to_le64(blkno);
|
||||
bt->hdr.seq = cpu_to_le64(seq);
|
||||
if (ref) {
|
||||
ref->blkno = bt->hdr.blkno;
|
||||
ref->seq = bt->hdr.seq;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
if (flags & BTW_DIRTY)
|
||||
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, SCOUTFS_BLOCK_MAGIC_BTREE,
|
||||
bl_ret, 0, NULL);
|
||||
else
|
||||
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BTREE, bl_ret);
|
||||
out:
|
||||
if (ret) {
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE)
|
||||
scoutfs_inc_counter(sb, btree_stale_read);
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -766,7 +662,7 @@ static void create_parent_item(struct scoutfs_btree_block *parent,
|
||||
{
|
||||
struct scoutfs_avl_node *par;
|
||||
int cmp;
|
||||
struct scoutfs_btree_ref ref = {
|
||||
struct scoutfs_block_ref ref = {
|
||||
.blkno = child->hdr.blkno,
|
||||
.seq = child->hdr.seq,
|
||||
};
|
||||
@@ -784,7 +680,7 @@ static void update_parent_item(struct scoutfs_btree_block *parent,
|
||||
struct scoutfs_btree_item *par_item,
|
||||
struct scoutfs_btree_block *child)
|
||||
{
|
||||
struct scoutfs_btree_ref *ref = item_val(parent, par_item);
|
||||
struct scoutfs_block_ref *ref = item_val(parent, par_item);
|
||||
|
||||
par_item->key = *item_key(last_item(child));
|
||||
ref->blkno = child->hdr.blkno;
|
||||
@@ -832,12 +728,13 @@ static int try_split(struct super_block *sb,
|
||||
struct scoutfs_block *par_bl = NULL;
|
||||
struct scoutfs_btree_block *left;
|
||||
struct scoutfs_key max_key;
|
||||
struct scoutfs_block_ref zeros;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
/* parents need to leave room for child references */
|
||||
if (right->level)
|
||||
val_len = sizeof(struct scoutfs_btree_ref);
|
||||
val_len = sizeof(struct scoutfs_block_ref);
|
||||
|
||||
/* don't need to split if there's enough space for the item */
|
||||
if (mid_free_item_room(right, val_len))
|
||||
@@ -849,7 +746,8 @@ static int try_split(struct super_block *sb,
|
||||
scoutfs_inc_counter(sb, btree_split);
|
||||
|
||||
/* alloc split neighbour first to avoid unwinding tree growth */
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &left_bl);
|
||||
memset(&zeros, 0, sizeof(zeros));
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &left_bl);
|
||||
if (ret)
|
||||
return ret;
|
||||
left = left_bl->data;
|
||||
@@ -857,7 +755,8 @@ static int try_split(struct super_block *sb,
|
||||
init_btree_block(left, right->level);
|
||||
|
||||
if (!parent) {
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC, NULL, &par_bl);
|
||||
memset(&zeros, 0, sizeof(zeros));
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &zeros, &par_bl);
|
||||
if (ret) {
|
||||
err = scoutfs_free_meta(sb, alloc, wri,
|
||||
le64_to_cpu(left->hdr.blkno));
|
||||
@@ -905,7 +804,7 @@ static int try_join(struct super_block *sb,
|
||||
struct scoutfs_btree_item *sib_par_item;
|
||||
struct scoutfs_btree_block *sib;
|
||||
struct scoutfs_block *sib_bl;
|
||||
struct scoutfs_btree_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
unsigned int sib_tot;
|
||||
bool move_right;
|
||||
int to_move;
|
||||
@@ -1194,7 +1093,7 @@ static int btree_walk(struct super_block *sb,
|
||||
struct scoutfs_btree_item *prev;
|
||||
struct scoutfs_avl_node *next_node;
|
||||
struct scoutfs_avl_node *node;
|
||||
struct scoutfs_btree_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
unsigned int level;
|
||||
unsigned int nr;
|
||||
int ret;
|
||||
@@ -1225,8 +1124,7 @@ restart:
|
||||
if (!(flags & BTW_INSERT)) {
|
||||
ret = -ENOENT;
|
||||
} else {
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC,
|
||||
&root->ref, &bl);
|
||||
ret = get_ref_block(sb, alloc, wri, BTW_ALLOC | BTW_DIRTY, &root->ref, &bl);
|
||||
if (ret == 0) {
|
||||
bt = bl->data;
|
||||
init_btree_block(bt, 0);
|
||||
|
||||
@@ -34,13 +34,10 @@
|
||||
|
||||
/*
|
||||
* The client is responsible for maintaining a connection to the server.
|
||||
* This includes managing quorum elections that determine which client
|
||||
* should run the server that all the clients connect to.
|
||||
*/
|
||||
|
||||
#define CLIENT_CONNECT_DELAY_MS (MSEC_PER_SEC / 10)
|
||||
#define CLIENT_CONNECT_TIMEOUT_MS (1 * MSEC_PER_SEC)
|
||||
#define CLIENT_QUORUM_TIMEOUT_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct client_info {
|
||||
struct super_block *sb;
|
||||
@@ -52,7 +49,6 @@ struct client_info {
|
||||
struct delayed_work connect_dwork;
|
||||
|
||||
u64 server_term;
|
||||
u64 greeting_umb;
|
||||
|
||||
bool sending_farewell;
|
||||
int farewell_error;
|
||||
@@ -121,16 +117,14 @@ int scoutfs_client_get_roots(struct super_block *sb,
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
__le64 before = cpu_to_le64p(seq);
|
||||
__le64 after;
|
||||
__le64 leseq;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
&before, sizeof(before),
|
||||
&after, sizeof(after));
|
||||
NULL, 0, &leseq, sizeof(leseq));
|
||||
if (ret == 0)
|
||||
*seq = le64_to_cpu(after);
|
||||
*seq = le64_to_cpu(leseq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -156,7 +150,7 @@ static int client_lock_response(struct super_block *sb,
|
||||
void *resp, unsigned int resp_len,
|
||||
int error, void *data)
|
||||
{
|
||||
if (resp_len != sizeof(struct scoutfs_net_lock_grant_response))
|
||||
if (resp_len != sizeof(struct scoutfs_net_lock))
|
||||
return -EINVAL;
|
||||
|
||||
/* XXX error? */
|
||||
@@ -282,10 +276,10 @@ static int client_greeting(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->format_hash != super->format_hash) {
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "server sent format 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->format_hash),
|
||||
le64_to_cpu(super->format_hash));
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -294,52 +288,30 @@ static int client_greeting(struct super_block *sb,
|
||||
scoutfs_net_client_greeting(sb, conn, new_server);
|
||||
|
||||
client->server_term = le64_to_cpu(gr->server_term);
|
||||
client->greeting_umb = le64_to_cpu(gr->unmount_barrier);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
* the work if the work fails and we're not shutting down.
|
||||
* The client is deciding if it needs to keep trying to reconnect to
|
||||
* have its farewell request processed. The server removes our mounted
|
||||
* client item last so that if we don't see it we know the server has
|
||||
* processed our farewell and we don't need to reconnect, we can unmount
|
||||
* safely.
|
||||
*
|
||||
* In the typical case a mount reads the super blocks and finds the
|
||||
* address of the currently running server and connects to it.
|
||||
* Non-voting clients who can't connect will keep trying alternating
|
||||
* reading the address and getting connect timeouts.
|
||||
*
|
||||
* Voting mounts will try to elect a leader if they can't connect to the
|
||||
* server. When a quorum can't connect and are able to elect a leader
|
||||
* then a new server is started. The new server will write its address
|
||||
* in the super and everyone will be able to connect.
|
||||
*
|
||||
* There's a tricky bit of coordination required to safely unmount.
|
||||
* Clients need to tell the server that they won't be coming back with a
|
||||
* farewell request. Once a client receives its farewell response it
|
||||
* can exit. But a majority of clients need to stick around to elect a
|
||||
* server to process all their farewell requests. This is coordinated
|
||||
* by having the greeting tell the server that a client is a voter. The
|
||||
* server then holds on to farewell requests from voters until only
|
||||
* requests from the final quorum remain. These farewell responses are
|
||||
* only sent after updating an unmount barrier in the super to indicate
|
||||
* to the final quorum that they can safely exit without having received
|
||||
* a farewell response over the network.
|
||||
* This is peeking at btree blocks that the server could be actively
|
||||
* freeing with cow updates so it can see stale blocks, we just return
|
||||
* the error and we'll retry eventually as the connection times out.
|
||||
*/
|
||||
static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
|
||||
{
|
||||
struct client_info *client = container_of(work, struct client_info,
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_voter = opts->server_addr.sin_addr.s_addr != 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
ktime_t timeout_abs;
|
||||
u64 elected_term;
|
||||
struct scoutfs_key key = {
|
||||
.sk_zone = SCOUTFS_MOUNTED_CLIENT_ZONE,
|
||||
.skmc_rid = cpu_to_le64(rid),
|
||||
};
|
||||
struct scoutfs_super_block *super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -352,57 +324,77 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* can safely unmount if we see that server processed our farewell */
|
||||
if (am_voter && client->sending_farewell &&
|
||||
(le64_to_cpu(super->unmount_barrier) > client->greeting_umb)) {
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key, &iref);
|
||||
if (ret == 0) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = 1;
|
||||
}
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
kfree(super);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
* the work if the work fails and we're not shutting down.
|
||||
*
|
||||
* We ask quorum for an address to try and connect to. If there isn't
|
||||
* one, or it fails, we back off a bit before trying again.
|
||||
*
|
||||
* There's a tricky bit of coordination required to safely unmount.
|
||||
* Clients need to tell the server that they won't be coming back with a
|
||||
* farewell request. Once the server processes a farewell request from
|
||||
* the client it can forget the client. If the connection is broken
|
||||
* before the client gets the farewell response it doesn't want to
|
||||
* reconnect to send it again.. instead the client can read the metadata
|
||||
* device to check for the lack of an item which indicates that the
|
||||
* server has processed its farewell.
|
||||
*/
|
||||
static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct client_info *client = container_of(work, struct client_info,
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
int ret;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
client->farewell_error = 0;
|
||||
complete(&client->farewell_comp);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* try to connect to the super's server address */
|
||||
scoutfs_addr_to_sin(&sin, &super->server_addr);
|
||||
if (sin.sin_addr.s_addr != 0 && sin.sin_port != 0)
|
||||
ret = scoutfs_net_connect(sb, client->conn, &sin,
|
||||
CLIENT_CONNECT_TIMEOUT_MS);
|
||||
else
|
||||
ret = -ENOTCONN;
|
||||
|
||||
/* voters try to elect a leader if they couldn't connect */
|
||||
if (ret < 0) {
|
||||
/* non-voters will keep retrying */
|
||||
if (!am_voter)
|
||||
goto out;
|
||||
|
||||
/* make sure local server isn't writing super during votes */
|
||||
scoutfs_server_stop(sb);
|
||||
|
||||
timeout_abs = ktime_add_ms(ktime_get(),
|
||||
CLIENT_QUORUM_TIMEOUT_MS);
|
||||
|
||||
ret = scoutfs_quorum_election(sb, timeout_abs,
|
||||
le64_to_cpu(super->quorum_server_term),
|
||||
&elected_term);
|
||||
/* start the server if we were asked to */
|
||||
if (elected_term > 0)
|
||||
ret = scoutfs_server_start(sb, &opts->server_addr,
|
||||
elected_term);
|
||||
ret = -ENOTCONN;
|
||||
ret = scoutfs_quorum_server_sin(sb, &sin);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_net_connect(sb, client->conn, &sin,
|
||||
CLIENT_CONNECT_TIMEOUT_MS);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.format_hash = super->format_hash;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.unmount_barrier = cpu_to_le64(client->greeting_umb);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
greet.flags = 0;
|
||||
if (client->sending_farewell)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_FAREWELL);
|
||||
if (am_voter)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_VOTER);
|
||||
if (am_quorum)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_QUORUM);
|
||||
|
||||
ret = scoutfs_net_submit_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_GREETING,
|
||||
@@ -411,7 +403,6 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
out:
|
||||
kfree(super);
|
||||
|
||||
/* always have a small delay before retrying to avoid storms */
|
||||
if (ret && !atomic_read(&client->shutting_down))
|
||||
|
||||
315
kmod/src/count.h
315
kmod/src/count.h
@@ -1,315 +0,0 @@
|
||||
#ifndef _SCOUTFS_COUNT_H_
|
||||
#define _SCOUTFS_COUNT_H_
|
||||
|
||||
/*
|
||||
* Our estimate of the space consumed while dirtying items is based on
|
||||
* the number of items and the size of their values.
|
||||
*
|
||||
* The estimate is still a read-only input to entering the transaction.
|
||||
* We'd like to use it as a clean rhs arg to hold_trans. We define SIC_
|
||||
* functions which return the count struct. This lets us have a single
|
||||
* arg and avoid bugs in initializing and passing in struct pointers
|
||||
* from callers. The internal __count functions are used compose an
|
||||
* estimate out of the sets of items it manipulates. We program in much
|
||||
* clearer C instead of in the preprocessor.
|
||||
*
|
||||
* Compilers are able to collapse the inlines into constants for the
|
||||
* constant estimates.
|
||||
*/
|
||||
|
||||
struct scoutfs_item_count {
|
||||
signed items;
|
||||
signed vals;
|
||||
};
|
||||
|
||||
/* The caller knows exactly what they're doing. */
|
||||
static inline const struct scoutfs_item_count SIC_EXACT(signed items,
|
||||
signed vals)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {
|
||||
.items = items,
|
||||
.vals = vals,
|
||||
};
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocating an inode creates a new set of indexed items.
|
||||
*/
|
||||
static inline void __count_alloc_inode(struct scoutfs_item_count *cnt)
|
||||
{
|
||||
const int nr_indices = SCOUTFS_INODE_INDEX_NR;
|
||||
|
||||
cnt->items += 1 + nr_indices;
|
||||
cnt->vals += sizeof(struct scoutfs_inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dirtying an inode dirties the inode item and can delete and create
|
||||
* the full set of indexed items.
|
||||
*/
|
||||
static inline void __count_dirty_inode(struct scoutfs_item_count *cnt)
|
||||
{
|
||||
const int nr_indices = 2 * SCOUTFS_INODE_INDEX_NR;
|
||||
|
||||
cnt->items += 1 + nr_indices;
|
||||
cnt->vals += sizeof(struct scoutfs_inode);
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_ALLOC_INODE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_alloc_inode(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_DIRTY_INODE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Directory entries are stored in three items.
|
||||
*/
|
||||
static inline void __count_dirents(struct scoutfs_item_count *cnt,
|
||||
unsigned name_len)
|
||||
{
|
||||
cnt->items += 3;
|
||||
cnt->vals += 3 * offsetof(struct scoutfs_dirent, name[name_len]);
|
||||
}
|
||||
|
||||
static inline void __count_sym_target(struct scoutfs_item_count *cnt,
|
||||
unsigned size)
|
||||
{
|
||||
unsigned nr = DIV_ROUND_UP(size, SCOUTFS_MAX_VAL_SIZE);
|
||||
|
||||
cnt->items += nr;
|
||||
cnt->vals += size;
|
||||
}
|
||||
|
||||
static inline void __count_orphan(struct scoutfs_item_count *cnt)
|
||||
{
|
||||
|
||||
cnt->items += 1;
|
||||
}
|
||||
|
||||
static inline void __count_mknod(struct scoutfs_item_count *cnt,
|
||||
unsigned name_len)
|
||||
{
|
||||
__count_alloc_inode(cnt);
|
||||
__count_dirents(cnt, name_len);
|
||||
__count_dirty_inode(cnt);
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_MKNOD(unsigned name_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_mknod(&cnt, name_len);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dropping the inode deletes all its items. Potentially enormous numbers
|
||||
* of items (data mapping, xattrs) are deleted in their own transactions.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_DROP_INODE(int mode,
|
||||
u64 size)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
if (S_ISLNK(mode))
|
||||
__count_sym_target(&cnt, size);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_orphan(&cnt);
|
||||
|
||||
cnt.vals = 0;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_LINK(unsigned name_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirents(&cnt, name_len);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlink can add orphan items.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_UNLINK(unsigned name_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirents(&cnt, name_len);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_orphan(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline const struct scoutfs_item_count SIC_SYMLINK(unsigned name_len,
|
||||
unsigned size)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_mknod(&cnt, name_len);
|
||||
__count_sym_target(&cnt, size);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* This assumes the worst case of a rename between directories that
|
||||
* unlinks an existing target. That'll be worse than the common case
|
||||
* by a few hundred bytes.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_RENAME(unsigned old_len,
|
||||
unsigned new_len)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
/* dirty dirs and inodes */
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
/* unlink old and new, link new */
|
||||
__count_dirents(&cnt, old_len);
|
||||
__count_dirents(&cnt, new_len);
|
||||
__count_dirents(&cnt, new_len);
|
||||
|
||||
/* orphan the existing target */
|
||||
__count_orphan(&cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Creating an xattr results in a dirty set of items with values that
|
||||
* store the xattr header, name, and value. There's always at least one
|
||||
* item with the header and name. Any previously existing items are
|
||||
* deleted which dirties their key but removes their value. The two
|
||||
* sets of items are indexed by different ids so their items don't
|
||||
* overlap.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_XATTR_SET(unsigned old_parts,
|
||||
bool creating,
|
||||
unsigned name_len,
|
||||
unsigned size)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned int new_parts;
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
if (old_parts)
|
||||
cnt.items += old_parts;
|
||||
|
||||
if (creating) {
|
||||
new_parts = SCOUTFS_XATTR_NR_PARTS(name_len, size);
|
||||
|
||||
cnt.items += new_parts;
|
||||
cnt.vals += sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* write_begin can have to allocate all the blocks in the page and can
|
||||
* have to add a big allocation from the server to do so:
|
||||
* - merge added free extents from the server
|
||||
* - remove a free extent per block
|
||||
* - remove an offline extent for every other block
|
||||
* - add a file extent per block
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_WRITE_BEGIN(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned nr_free = (1 + SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
|
||||
unsigned nr_file = (DIV_ROUND_UP(SCOUTFS_BLOCK_SM_PER_PAGE, 2) +
|
||||
SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items += nr_free + nr_file;
|
||||
cnt.vals += nr_file;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncating an extent can:
|
||||
* - delete existing file extent,
|
||||
* - create two surrounding file extents,
|
||||
* - add an offline file extent,
|
||||
* - delete two existing free extents
|
||||
* - create a merged free extent
|
||||
*/
|
||||
static inline const struct scoutfs_item_count
|
||||
SIC_TRUNC_EXTENT(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned int nr_file = 1 + 2 + 1;
|
||||
unsigned int nr_free = (2 + 1) * 2;
|
||||
|
||||
if (inode)
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items += nr_file + nr_free;
|
||||
cnt.vals += nr_file;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallocating an extent can, at most:
|
||||
* - allocate from the server: delete two free and insert merged
|
||||
* - free an allocated extent: delete one and create two split
|
||||
* - remove an unallocated file extent: delete one and create two split
|
||||
* - add an fallocated flie extent: delete two and inset one merged
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_FALLOCATE_ONE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
unsigned int nr_free = ((1 + 2) * 2) * 2;
|
||||
unsigned int nr_file = (1 + 2) * 2;
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items += nr_free + nr_file;
|
||||
cnt.vals += nr_file;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* ioc_setattr_more can dirty the inode and add a single offline extent.
|
||||
*/
|
||||
static inline const struct scoutfs_item_count SIC_SETATTR_MORE(void)
|
||||
{
|
||||
struct scoutfs_item_count cnt = {0,};
|
||||
|
||||
__count_dirty_inode(&cnt);
|
||||
|
||||
cnt.items++;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -20,17 +20,21 @@
|
||||
EXPAND_COUNTER(alloc_list_freed_hi) \
|
||||
EXPAND_COUNTER(alloc_move) \
|
||||
EXPAND_COUNTER(alloc_moved_extent) \
|
||||
EXPAND_COUNTER(alloc_stale_cached_list_block) \
|
||||
EXPAND_COUNTER(block_cache_access) \
|
||||
EXPAND_COUNTER(alloc_stale_list_block) \
|
||||
EXPAND_COUNTER(block_cache_access_update) \
|
||||
EXPAND_COUNTER(block_cache_alloc_failure) \
|
||||
EXPAND_COUNTER(block_cache_alloc_page_order) \
|
||||
EXPAND_COUNTER(block_cache_alloc_virt) \
|
||||
EXPAND_COUNTER(block_cache_end_io_error) \
|
||||
EXPAND_COUNTER(block_cache_forget) \
|
||||
EXPAND_COUNTER(block_cache_free) \
|
||||
EXPAND_COUNTER(block_cache_invalidate) \
|
||||
EXPAND_COUNTER(block_cache_lru_move) \
|
||||
EXPAND_COUNTER(block_cache_free_work) \
|
||||
EXPAND_COUNTER(block_cache_remove_stale) \
|
||||
EXPAND_COUNTER(block_cache_shrink) \
|
||||
EXPAND_COUNTER(block_cache_shrink_next) \
|
||||
EXPAND_COUNTER(block_cache_shrink_recent) \
|
||||
EXPAND_COUNTER(block_cache_shrink_remove) \
|
||||
EXPAND_COUNTER(block_cache_shrink_restart) \
|
||||
EXPAND_COUNTER(btree_compact_values) \
|
||||
EXPAND_COUNTER(btree_compact_values_enomem) \
|
||||
EXPAND_COUNTER(btree_delete) \
|
||||
@@ -42,7 +46,6 @@
|
||||
EXPAND_COUNTER(btree_lookup) \
|
||||
EXPAND_COUNTER(btree_next) \
|
||||
EXPAND_COUNTER(btree_prev) \
|
||||
EXPAND_COUNTER(btree_read_error) \
|
||||
EXPAND_COUNTER(btree_split) \
|
||||
EXPAND_COUNTER(btree_stale_read) \
|
||||
EXPAND_COUNTER(btree_update) \
|
||||
@@ -58,6 +61,8 @@
|
||||
EXPAND_COUNTER(corrupt_symlink_inode_size) \
|
||||
EXPAND_COUNTER(corrupt_symlink_missing_item) \
|
||||
EXPAND_COUNTER(corrupt_symlink_not_null_term) \
|
||||
EXPAND_COUNTER(data_fallocate_enobufs_retry) \
|
||||
EXPAND_COUNTER(data_write_begin_enobufs_retry) \
|
||||
EXPAND_COUNTER(dentry_revalidate_error) \
|
||||
EXPAND_COUNTER(dentry_revalidate_invalid) \
|
||||
EXPAND_COUNTER(dentry_revalidate_locked) \
|
||||
@@ -71,6 +76,7 @@
|
||||
EXPAND_COUNTER(ext_op_remove) \
|
||||
EXPAND_COUNTER(forest_bloom_fail) \
|
||||
EXPAND_COUNTER(forest_bloom_pass) \
|
||||
EXPAND_COUNTER(forest_bloom_stale) \
|
||||
EXPAND_COUNTER(forest_read_items) \
|
||||
EXPAND_COUNTER(forest_roots_next_hint) \
|
||||
EXPAND_COUNTER(forest_set_bloom_bits) \
|
||||
@@ -137,18 +143,21 @@
|
||||
EXPAND_COUNTER(net_recv_invalid_message) \
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(quorum_cycle) \
|
||||
EXPAND_COUNTER(quorum_elected_leader) \
|
||||
EXPAND_COUNTER(quorum_election_timeout) \
|
||||
EXPAND_COUNTER(quorum_failure) \
|
||||
EXPAND_COUNTER(quorum_read_block) \
|
||||
EXPAND_COUNTER(quorum_read_block_error) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
EXPAND_COUNTER(quorum_read_invalid_block) \
|
||||
EXPAND_COUNTER(quorum_saw_super_leader) \
|
||||
EXPAND_COUNTER(quorum_timedout) \
|
||||
EXPAND_COUNTER(quorum_write_block) \
|
||||
EXPAND_COUNTER(quorum_write_block_error) \
|
||||
EXPAND_COUNTER(quorum_fenced) \
|
||||
EXPAND_COUNTER(quorum_recv_error) \
|
||||
EXPAND_COUNTER(quorum_recv_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_recv_invalid) \
|
||||
EXPAND_COUNTER(quorum_recv_resignation) \
|
||||
EXPAND_COUNTER(quorum_recv_vote) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_send_resignation) \
|
||||
EXPAND_COUNTER(quorum_send_request) \
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
EXPAND_COUNTER(quorum_server_shutdown) \
|
||||
EXPAND_COUNTER(quorum_term_follower) \
|
||||
EXPAND_COUNTER(server_commit_hold) \
|
||||
EXPAND_COUNTER(server_commit_queue) \
|
||||
EXPAND_COUNTER(server_commit_worker) \
|
||||
@@ -158,7 +167,6 @@
|
||||
EXPAND_COUNTER(srch_compact_flush) \
|
||||
EXPAND_COUNTER(srch_compact_log_page) \
|
||||
EXPAND_COUNTER(srch_compact_removed_entry) \
|
||||
EXPAND_COUNTER(srch_inconsistent_ref) \
|
||||
EXPAND_COUNTER(srch_rotate_log) \
|
||||
EXPAND_COUNTER(srch_search_log) \
|
||||
EXPAND_COUNTER(srch_search_log_block) \
|
||||
|
||||
438
kmod/src/data.c
438
kmod/src/data.c
@@ -37,8 +37,8 @@
|
||||
#include "lock.h"
|
||||
#include "file.h"
|
||||
#include "msg.h"
|
||||
#include "count.h"
|
||||
#include "ext.h"
|
||||
#include "util.h"
|
||||
|
||||
/*
|
||||
* We want to amortize work done after dirtying the shared transaction
|
||||
@@ -53,9 +53,8 @@ struct data_info {
|
||||
struct mutex mutex;
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
struct scoutfs_alloc_root data_avail;
|
||||
struct scoutfs_alloc_root data_freed;
|
||||
struct scoutfs_extent cached_ext;
|
||||
struct scoutfs_data_alloc dalloc;
|
||||
};
|
||||
|
||||
#define DECLARE_DATA_INFO(sb, name) \
|
||||
@@ -93,6 +92,16 @@ static void ext_from_item(struct scoutfs_extent *ext,
|
||||
ext->flags = dv->flags;
|
||||
}
|
||||
|
||||
static void data_ext_op_warn(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si;
|
||||
|
||||
if (inode) {
|
||||
si = SCOUTFS_I(inode);
|
||||
WARN_ON_ONCE(!rwsem_is_locked(&si->extent_sem));
|
||||
}
|
||||
}
|
||||
|
||||
static int data_ext_next(struct super_block *sb, void *arg, u64 start, u64 len,
|
||||
struct scoutfs_extent *ext)
|
||||
{
|
||||
@@ -102,6 +111,8 @@ static int data_ext_next(struct super_block *sb, void *arg, u64 start, u64 len,
|
||||
struct scoutfs_key last;
|
||||
int ret;
|
||||
|
||||
data_ext_op_warn(args->inode);
|
||||
|
||||
item_from_extent(&last, &dv, args->ino, U64_MAX, 1, 0, 0);
|
||||
item_from_extent(&key, &dv, args->ino, start, len, 0, 0);
|
||||
|
||||
@@ -139,6 +150,8 @@ static int data_ext_insert(struct super_block *sb, void *arg, u64 start,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
data_ext_op_warn(args->inode);
|
||||
|
||||
item_from_extent(&key, &dv, args->ino, start, len, map, flags);
|
||||
ret = scoutfs_item_create(sb, &key, &dv, sizeof(dv), args->lock);
|
||||
if (ret == 0 && args->inode)
|
||||
@@ -154,6 +167,8 @@ static int data_ext_remove(struct super_block *sb, void *arg, u64 start,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
data_ext_op_warn(args->inode);
|
||||
|
||||
item_from_extent(&key, &dv, args->ino, start, len, map, flags);
|
||||
ret = scoutfs_item_delete(sb, &key, args->lock);
|
||||
if (ret == 0 && args->inode)
|
||||
@@ -275,7 +290,7 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
u64 ino, u64 iblock, u64 last, bool offline,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_item_count cnt = SIC_TRUNC_EXTENT(inode);
|
||||
struct scoutfs_inode_info *si = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
s64 ret = 0;
|
||||
|
||||
@@ -290,12 +305,17 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
if (WARN_ON_ONCE(last < iblock))
|
||||
return -EINVAL;
|
||||
|
||||
if (inode) {
|
||||
si = SCOUTFS_I(inode);
|
||||
down_write(&si->extent_sem);
|
||||
}
|
||||
|
||||
while (iblock <= last) {
|
||||
if (inode)
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
|
||||
true, cnt);
|
||||
true);
|
||||
else
|
||||
ret = scoutfs_hold_trans(sb, cnt);
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -321,6 +341,9 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (si)
|
||||
up_write(&si->extent_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -407,8 +430,7 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
|
||||
count = 1;
|
||||
|
||||
ret = scoutfs_alloc_data(sb, datinf->alloc, datinf->wri,
|
||||
&datinf->data_avail, &datinf->cached_ext,
|
||||
count, &blkno, &count);
|
||||
&datinf->dalloc, count, &blkno, &count);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -533,6 +555,38 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Typically extent item users are serialized by i_mutex. But page
|
||||
* readers only hold the page lock and need to be protected from writers
|
||||
* in other pages which can be manipulating neighbouring extents as
|
||||
* they split and merge.
|
||||
*/
|
||||
static int scoutfs_get_block_read(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
int ret;
|
||||
|
||||
down_read(&si->extent_sem);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create);
|
||||
up_read(&si->extent_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_get_block_write(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
int ret;
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create);
|
||||
up_write(&si->extent_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is almost never used. We can't block on a cluster lock while
|
||||
* holding the page lock because lock invalidation gets the page lock
|
||||
@@ -598,7 +652,7 @@ static int scoutfs_readpage(struct file *file, struct page *page)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = mpage_readpage(page, scoutfs_get_block);
|
||||
ret = mpage_readpage(page, scoutfs_get_block_read);
|
||||
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
@@ -646,7 +700,7 @@ static int scoutfs_readpages(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
}
|
||||
|
||||
ret = mpage_readpages(mapping, pages, nr_pages, scoutfs_get_block);
|
||||
ret = mpage_readpages(mapping, pages, nr_pages, scoutfs_get_block_read);
|
||||
out:
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
BUG_ON(!list_empty(pages));
|
||||
@@ -655,13 +709,13 @@ out:
|
||||
|
||||
static int scoutfs_writepage(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
return block_write_full_page(page, scoutfs_get_block, wbc);
|
||||
return block_write_full_page(page, scoutfs_get_block_write, wbc);
|
||||
}
|
||||
|
||||
static int scoutfs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
return mpage_writepages(mapping, wbc, scoutfs_get_block);
|
||||
return mpage_writepages(mapping, wbc, scoutfs_get_block_write);
|
||||
}
|
||||
|
||||
/* fsdata allocated in write_begin and freed in write_end */
|
||||
@@ -697,13 +751,13 @@ static int scoutfs_write_begin(struct file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
do {
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &wbd->ind_locks, inode,
|
||||
true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &wbd->ind_locks,
|
||||
ind_seq,
|
||||
SIC_WRITE_BEGIN());
|
||||
ind_seq);
|
||||
} while (ret > 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -712,17 +766,22 @@ static int scoutfs_write_begin(struct file *file,
|
||||
flags |= AOP_FLAG_NOFS;
|
||||
|
||||
/* generic write_end updates i_size and calls dirty_inode */
|
||||
ret = scoutfs_dirty_inode_item(inode, wbd->lock);
|
||||
if (ret == 0)
|
||||
ret = block_write_begin(mapping, pos, len, flags, pagep,
|
||||
scoutfs_get_block);
|
||||
if (ret)
|
||||
ret = scoutfs_dirty_inode_item(inode, wbd->lock) ?:
|
||||
block_write_begin(mapping, pos, len, flags, pagep,
|
||||
scoutfs_get_block_write);
|
||||
if (ret < 0) {
|
||||
scoutfs_release_trans(sb);
|
||||
out:
|
||||
if (ret) {
|
||||
scoutfs_inode_index_unlock(sb, &wbd->ind_locks);
|
||||
kfree(wbd);
|
||||
if (ret == -ENOBUFS) {
|
||||
/* Retry with a new transaction. */
|
||||
scoutfs_inc_counter(sb, data_write_begin_enobufs_retry);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret < 0)
|
||||
kfree(wbd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -859,9 +918,8 @@ static s64 fallocate_extents(struct super_block *sb, struct inode *inode,
|
||||
mutex_lock(&datinf->mutex);
|
||||
|
||||
ret = scoutfs_alloc_data(sb, datinf->alloc, datinf->wri,
|
||||
&datinf->data_avail,
|
||||
&datinf->cached_ext,
|
||||
count, &blkno, &count);
|
||||
&datinf->dalloc, count,
|
||||
&blkno, &count);
|
||||
if (ret == 0) {
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &args, iblock,
|
||||
count, blkno,
|
||||
@@ -869,7 +927,7 @@ static s64 fallocate_extents(struct super_block *sb, struct inode *inode,
|
||||
if (ret < 0) {
|
||||
err = scoutfs_free_data(sb, datinf->alloc,
|
||||
datinf->wri,
|
||||
&datinf->data_avail,
|
||||
&datinf->data_freed,
|
||||
blkno, count);
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
@@ -903,6 +961,7 @@ static s64 fallocate_extents(struct super_block *sb, struct inode *inode,
|
||||
long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
@@ -913,6 +972,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
s64 ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
down_write(&si->extent_sem);
|
||||
|
||||
/* XXX support more flags */
|
||||
if (mode & ~(FALLOC_FL_KEEP_SIZE)) {
|
||||
@@ -950,8 +1010,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
|
||||
while(iblock <= last) {
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_FALLOCATE_ONE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -969,6 +1028,12 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
/* txn couldn't meet the request. Let's try with a new txn */
|
||||
if (ret == -ENOBUFS) {
|
||||
scoutfs_inc_counter(sb, data_fallocate_enobufs_retry);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
@@ -978,6 +1043,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
|
||||
out:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
up_write(&si->extent_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
trace_scoutfs_data_fallocate(sb, ino, mode, offset, len, ret);
|
||||
@@ -998,6 +1064,7 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
struct scoutfs_lock *lock)
|
||||
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct data_ext_args args = {
|
||||
.ino = scoutfs_ino(inode),
|
||||
@@ -1019,8 +1086,7 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
}
|
||||
|
||||
/* we're updating meta_seq with offline block count */
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_SETATTR_MORE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1028,8 +1094,10 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
ret = scoutfs_ext_insert(sb, &data_ext_ops, &args,
|
||||
0, count, 0, SEF_OFFLINE);
|
||||
up_write(&si->extent_sem);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
@@ -1043,6 +1111,277 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're using truncate_inode_pages_range to maintain consistency
|
||||
* between the page cache and extents that just changed. We have to
|
||||
* call with full aligned page offsets or it thinks that it should leave
|
||||
* behind a zeroed partial page.
|
||||
*/
|
||||
static void truncate_inode_pages_extent(struct inode *inode, u64 start, u64 len)
|
||||
{
|
||||
truncate_inode_pages_range(&inode->i_data,
|
||||
start << SCOUTFS_BLOCK_SM_SHIFT,
|
||||
((start + len) << SCOUTFS_BLOCK_SM_SHIFT) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Move extents from one file to another. The behaviour is more fully
|
||||
* explained above the move_blocks ioctl argument structure definition.
|
||||
*
|
||||
* The caller has processed the ioctl args and performed the most basic
|
||||
* inode checks, but we perform more detailed inode checks once we have
|
||||
* the inode lock and refreshed inodes. Our job is to safely lock the
|
||||
* two files and move the extents.
|
||||
*/
|
||||
#define MOVE_DATA_EXTENTS_PER_HOLD 16
|
||||
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
u64 byte_len, struct inode *to, u64 to_off, bool is_stage,
|
||||
u64 data_version)
|
||||
{
|
||||
struct scoutfs_inode_info *from_si = SCOUTFS_I(from);
|
||||
struct scoutfs_inode_info *to_si = SCOUTFS_I(to);
|
||||
struct super_block *sb = from->i_sb;
|
||||
struct scoutfs_lock *from_lock = NULL;
|
||||
struct scoutfs_lock *to_lock = NULL;
|
||||
struct data_ext_args from_args;
|
||||
struct data_ext_args to_args;
|
||||
struct scoutfs_extent ext;
|
||||
struct timespec cur_time;
|
||||
LIST_HEAD(locks);
|
||||
bool done = false;
|
||||
loff_t from_size;
|
||||
loff_t to_size;
|
||||
u64 from_offline;
|
||||
u64 to_offline;
|
||||
u64 from_start;
|
||||
u64 to_start;
|
||||
u64 from_iblock;
|
||||
u64 to_iblock;
|
||||
u64 count;
|
||||
u64 junk;
|
||||
u64 seq;
|
||||
u64 map;
|
||||
u64 len;
|
||||
int ret;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
lock_two_nondirectories(from, to);
|
||||
|
||||
ret = scoutfs_lock_inodes(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, from, &from_lock,
|
||||
to, &to_lock, NULL, NULL, NULL, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if ((from_off & SCOUTFS_BLOCK_SM_MASK) ||
|
||||
(to_off & SCOUTFS_BLOCK_SM_MASK) ||
|
||||
((byte_len & SCOUTFS_BLOCK_SM_MASK) &&
|
||||
(from_off + byte_len != i_size_read(from)))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_stage && (data_version != SCOUTFS_I(to)->data_version)) {
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
from_iblock = from_off >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
count = (byte_len + SCOUTFS_BLOCK_SM_MASK) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
to_iblock = to_off >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
if (S_ISDIR(from->i_mode) || S_ISDIR(to->i_mode)) {
|
||||
ret = -EISDIR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!S_ISREG(from->i_mode) || !S_ISREG(to->i_mode)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = inode_permission(from, MAY_WRITE) ?:
|
||||
inode_permission(to, MAY_WRITE);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* can't stage once data_version changes */
|
||||
scoutfs_inode_get_onoff(from, &junk, &from_offline);
|
||||
scoutfs_inode_get_onoff(to, &junk, &to_offline);
|
||||
if (from_offline || (to_offline && !is_stage)) {
|
||||
ret = -ENODATA;
|
||||
goto out;
|
||||
}
|
||||
|
||||
from_args = (struct data_ext_args) {
|
||||
.ino = scoutfs_ino(from),
|
||||
.inode = from,
|
||||
.lock = from_lock,
|
||||
};
|
||||
|
||||
to_args = (struct data_ext_args) {
|
||||
.ino = scoutfs_ino(to),
|
||||
.inode = to,
|
||||
.lock = to_lock,
|
||||
};
|
||||
|
||||
inode_dio_wait(from);
|
||||
inode_dio_wait(to);
|
||||
|
||||
ret = filemap_write_and_wait_range(&from->i_data, from_off,
|
||||
from_off + byte_len - 1);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
for (;;) {
|
||||
ret = scoutfs_inode_index_start(sb, &seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &locks, from, true) ?:
|
||||
scoutfs_inode_index_prepare(sb, &locks, to, true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &locks, seq);
|
||||
if (ret > 0)
|
||||
continue;
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_dirty_inode_item(from, from_lock) ?:
|
||||
scoutfs_dirty_inode_item(to, to_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
down_write_two(&from_si->extent_sem, &to_si->extent_sem);
|
||||
|
||||
/* arbitrarily limit the number of extents per trans hold */
|
||||
for (i = 0; i < MOVE_DATA_EXTENTS_PER_HOLD; i++) {
|
||||
struct scoutfs_extent off_ext;
|
||||
|
||||
/* find the next extent to move */
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &from_args,
|
||||
from_iblock, 1, &ext);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
done = true;
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* only move extents within count and i_size */
|
||||
if (ext.start >= from_iblock + count ||
|
||||
ext.start >= i_size_read(from)) {
|
||||
done = true;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
from_start = max(ext.start, from_iblock);
|
||||
map = ext.map + (from_start - ext.start);
|
||||
len = min3(from_iblock + count,
|
||||
round_up((u64)i_size_read(from),
|
||||
SCOUTFS_BLOCK_SM_SIZE),
|
||||
ext.start + ext.len) - from_start;
|
||||
|
||||
to_start = to_iblock + (from_start - from_iblock);
|
||||
|
||||
if (is_stage) {
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &to_args,
|
||||
to_iblock, 1, &off_ext);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (!scoutfs_ext_inside(to_start, len, &off_ext) ||
|
||||
!(off_ext.flags & SEF_OFFLINE)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &to_args,
|
||||
to_start, len,
|
||||
map, ext.flags);
|
||||
} else {
|
||||
/* insert the new, fails if it overlaps */
|
||||
ret = scoutfs_ext_insert(sb, &data_ext_ops, &to_args,
|
||||
to_start, len,
|
||||
map, ext.flags);
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* remove the old, possibly splitting */
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &from_args,
|
||||
from_start, len, 0, 0);
|
||||
if (ret < 0) {
|
||||
if (is_stage) {
|
||||
/* re-mark dest range as offline */
|
||||
WARN_ON_ONCE(!(off_ext.flags & SEF_OFFLINE));
|
||||
err = scoutfs_ext_set(sb, &data_ext_ops, &to_args,
|
||||
to_start, len,
|
||||
0, off_ext.flags);
|
||||
} else {
|
||||
/* remove inserted new on err */
|
||||
err = scoutfs_ext_remove(sb, &data_ext_ops,
|
||||
&to_args, to_start,
|
||||
len);
|
||||
}
|
||||
BUG_ON(err); /* XXX inconsistent */
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_data_move_blocks(sb, scoutfs_ino(from),
|
||||
from_start, len, map,
|
||||
ext.flags,
|
||||
scoutfs_ino(to),
|
||||
to_start);
|
||||
|
||||
/* moved extent might extend i_size */
|
||||
to_size = (to_start + len) << SCOUTFS_BLOCK_SM_SHIFT;
|
||||
if (to_size > i_size_read(to)) {
|
||||
/* while maintaining final partial */
|
||||
from_size = (from_start + len) <<
|
||||
SCOUTFS_BLOCK_SM_SHIFT;
|
||||
if (from_size > i_size_read(from))
|
||||
to_size -= from_size -
|
||||
i_size_read(from);
|
||||
i_size_write(to, to_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
up_write(&from_si->extent_sem);
|
||||
up_write(&to_si->extent_sem);
|
||||
|
||||
cur_time = CURRENT_TIME;
|
||||
if (!is_stage) {
|
||||
to->i_ctime = to->i_mtime = cur_time;
|
||||
scoutfs_inode_inc_data_version(to);
|
||||
scoutfs_inode_set_data_seq(to);
|
||||
}
|
||||
from->i_ctime = from->i_mtime = cur_time;
|
||||
scoutfs_inode_inc_data_version(from);
|
||||
scoutfs_inode_set_data_seq(from);
|
||||
|
||||
scoutfs_update_inode_item(from, from_lock, &locks);
|
||||
scoutfs_update_inode_item(to, to_lock, &locks);
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &locks);
|
||||
|
||||
if (ret < 0 || done)
|
||||
break;
|
||||
}
|
||||
|
||||
/* remove any cached pages from old extents */
|
||||
truncate_inode_pages_extent(from, from_iblock, count);
|
||||
truncate_inode_pages_extent(to, to_iblock, count);
|
||||
|
||||
out:
|
||||
scoutfs_unlock(sb, from_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, to_lock, SCOUTFS_LOCK_WRITE);
|
||||
|
||||
unlock_two_nondirectories(from, to);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This copies to userspace :/
|
||||
*/
|
||||
@@ -1075,6 +1414,7 @@ static int fill_extent(struct fiemap_extent_info *fieinfo,
|
||||
int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
@@ -1095,8 +1435,8 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* XXX overkill? */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
down_read(&si->extent_sem);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &lock);
|
||||
if (ret)
|
||||
@@ -1148,6 +1488,7 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
ret = fill_extent(fieinfo, &cur, last_flags);
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
up_read(&si->extent_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
out:
|
||||
@@ -1227,8 +1568,9 @@ static struct scoutfs_data_wait *dw_next(struct scoutfs_data_wait *dw)
|
||||
* Check if we should wait by looking for extents whose flags match.
|
||||
* Returns 0 if no extents were found or any error encountered.
|
||||
*
|
||||
* The caller must have locked the extents before calling, both across
|
||||
* mounts and within this mount.
|
||||
* The caller must have acquired a cluster lock that covers the extent
|
||||
* items. We acquire the extent_sem to protect our read from writers in
|
||||
* other tasks.
|
||||
*
|
||||
* Returns 1 if any file extents in the caller's region matched. If the
|
||||
* wait struct is provided then it is initialized to be woken when the
|
||||
@@ -1240,6 +1582,7 @@ int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
u8 sef, u8 op, struct scoutfs_data_wait *dw,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct data_ext_args args = {
|
||||
@@ -1272,6 +1615,8 @@ int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
}
|
||||
}
|
||||
|
||||
down_read(&si->extent_sem);
|
||||
|
||||
iblock = pos >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
last_block = (pos + len - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
@@ -1308,6 +1653,8 @@ int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
iblock = ext.start + ext.len;
|
||||
}
|
||||
|
||||
up_read(&si->extent_sem);
|
||||
|
||||
out:
|
||||
trace_scoutfs_data_wait_check(sb, ino, pos, len, sef, op, &ext, ret);
|
||||
|
||||
@@ -1461,7 +1808,7 @@ void scoutfs_data_init_btrees(struct super_block *sb,
|
||||
|
||||
datinf->alloc = alloc;
|
||||
datinf->wri = wri;
|
||||
datinf->data_avail = lt->data_avail;
|
||||
scoutfs_dalloc_init(&datinf->dalloc, <->data_avail);
|
||||
datinf->data_freed = lt->data_freed;
|
||||
|
||||
mutex_unlock(&datinf->mutex);
|
||||
@@ -1474,7 +1821,7 @@ void scoutfs_data_get_btrees(struct super_block *sb,
|
||||
|
||||
mutex_lock(&datinf->mutex);
|
||||
|
||||
lt->data_avail = datinf->data_avail;
|
||||
scoutfs_dalloc_get_root(&datinf->dalloc, <->data_avail);
|
||||
lt->data_freed = datinf->data_freed;
|
||||
|
||||
mutex_unlock(&datinf->mutex);
|
||||
@@ -1490,31 +1837,20 @@ int scoutfs_data_prepare_commit(struct super_block *sb)
|
||||
int ret;
|
||||
|
||||
mutex_lock(&datinf->mutex);
|
||||
if (datinf->cached_ext.len) {
|
||||
ret = scoutfs_free_data(sb, datinf->alloc, datinf->wri,
|
||||
&datinf->data_avail,
|
||||
datinf->cached_ext.start,
|
||||
datinf->cached_ext.len);
|
||||
if (ret == 0)
|
||||
memset(&datinf->cached_ext, 0,
|
||||
sizeof(datinf->cached_ext));
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
ret = scoutfs_dalloc_return_cached(sb, datinf->alloc, datinf->wri,
|
||||
&datinf->dalloc);
|
||||
mutex_unlock(&datinf->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This isn't serializing with allocators so it can be a bit racey.
|
||||
*/
|
||||
u64 scoutfs_data_alloc_free_bytes(struct super_block *sb)
|
||||
{
|
||||
DECLARE_DATA_INFO(sb, datinf);
|
||||
|
||||
return le64_to_cpu(datinf->data_avail.total_len) <<
|
||||
SCOUTFS_BLOCK_SM_SHIFT;
|
||||
return scoutfs_dalloc_total_len(&datinf->dalloc) <<
|
||||
SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
}
|
||||
|
||||
int scoutfs_data_setup(struct super_block *sb)
|
||||
|
||||
@@ -58,6 +58,9 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len);
|
||||
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
u64 byte_len, struct inode *to, u64 to_off, bool to_stage,
|
||||
u64 data_version);
|
||||
|
||||
int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
u8 sef, u8 op, struct scoutfs_data_wait *ow,
|
||||
|
||||
@@ -463,7 +463,18 @@ out:
|
||||
else
|
||||
inode = scoutfs_iget(sb, ino);
|
||||
|
||||
return d_splice_alias(inode, dentry);
|
||||
/*
|
||||
* We can't splice dir aliases into the dcache. dir entries
|
||||
* might have changed on other nodes so our dcache could still
|
||||
* contain them, rather than having been moved in rename. For
|
||||
* dirs, we use d_materialize_unique to remove any existing
|
||||
* aliases which must be stale. Our inode numbers aren't reused
|
||||
* so inodes pointed to by entries can't change types.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(inode) && S_ISDIR(inode->i_mode))
|
||||
return d_materialise_unique(dentry, inode);
|
||||
else
|
||||
return d_splice_alias(inode, dentry);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -655,7 +666,6 @@ static int del_entry_items(struct super_block *sb, u64 dir_ino, u64 hash,
|
||||
*/
|
||||
static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, dev_t rdev,
|
||||
const struct scoutfs_item_count cnt,
|
||||
struct scoutfs_lock **dir_lock,
|
||||
struct scoutfs_lock **inode_lock,
|
||||
struct list_head *ind_locks)
|
||||
@@ -694,7 +704,7 @@ retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, ind_locks, dir, true) ?:
|
||||
scoutfs_inode_index_prepare_ino(sb, ind_locks, ino, mode) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq, cnt);
|
||||
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -741,7 +751,6 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
|
||||
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
|
||||
inode = lock_hold_create(dir, dentry, mode, rdev,
|
||||
SIC_MKNOD(dentry->d_name.len),
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
@@ -804,6 +813,7 @@ static int scoutfs_link(struct dentry *old_dentry,
|
||||
struct scoutfs_lock *dir_lock;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool del_orphan;
|
||||
u64 dir_size;
|
||||
u64 ind_seq;
|
||||
u64 hash;
|
||||
@@ -832,12 +842,13 @@ static int scoutfs_link(struct dentry *old_dentry,
|
||||
goto out_unlock;
|
||||
|
||||
dir_size = i_size_read(dir) + dentry->d_name.len;
|
||||
del_orphan = (inode->i_nlink == 0);
|
||||
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_LINK(dentry->d_name.len));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -847,6 +858,12 @@ retry:
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (del_orphan) {
|
||||
ret = scoutfs_orphan_dirty(sb, scoutfs_ino(inode));
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
pos = SCOUTFS_I(dir)->next_readdir_pos++;
|
||||
|
||||
ret = add_entry_items(sb, scoutfs_ino(dir), hash, pos,
|
||||
@@ -862,6 +879,11 @@ retry:
|
||||
inode->i_ctime = dir->i_mtime;
|
||||
inc_nlink(inode);
|
||||
|
||||
if (del_orphan) {
|
||||
ret = scoutfs_orphan_delete(sb, scoutfs_ino(inode));
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
|
||||
@@ -918,8 +940,7 @@ retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_UNLINK(dentry->d_name.len));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -1154,7 +1175,6 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
return ret;
|
||||
|
||||
inode = lock_hold_create(dir, dentry, S_IFLNK|S_IRWXUGO, 0,
|
||||
SIC_SYMLINK(dentry->d_name.len, name_len),
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
@@ -1586,9 +1606,7 @@ retry:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, new_dir, false)) ?:
|
||||
(new_inode == NULL ? 0 :
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, new_inode, false)) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_RENAME(old_dentry->d_name.len,
|
||||
new_dentry->d_name.len));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -1756,6 +1774,42 @@ static int scoutfs_dir_open(struct inode *inode, struct file *file)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct inode *inode = NULL;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
if (dentry->d_name.len > SCOUTFS_NAME_LEN)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
inode = lock_hold_create(dir, dentry, mode, 0,
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
insert_inode_hash(inode);
|
||||
d_tmpfile(dentry, inode);
|
||||
|
||||
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
ret = scoutfs_orphan_inode(inode);
|
||||
WARN_ON_ONCE(ret); /* XXX returning error but items deleted */
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct file_operations scoutfs_dir_fops = {
|
||||
.KC_FOP_READDIR = scoutfs_readdir,
|
||||
#ifdef KC_FMODE_KABI_ITERATE
|
||||
@@ -1766,7 +1820,10 @@ const struct file_operations scoutfs_dir_fops = {
|
||||
.llseek = generic_file_llseek,
|
||||
};
|
||||
|
||||
const struct inode_operations scoutfs_dir_iops = {
|
||||
|
||||
|
||||
const struct inode_operations_wrapper scoutfs_dir_iops = {
|
||||
.ops = {
|
||||
.lookup = scoutfs_lookup,
|
||||
.mknod = scoutfs_mknod,
|
||||
.create = scoutfs_create,
|
||||
@@ -1783,6 +1840,8 @@ const struct inode_operations scoutfs_dir_iops = {
|
||||
.removexattr = scoutfs_removexattr,
|
||||
.symlink = scoutfs_symlink,
|
||||
.permission = scoutfs_permission,
|
||||
},
|
||||
.tmpfile = scoutfs_tmpfile,
|
||||
};
|
||||
|
||||
void scoutfs_dir_exit(void)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include "lock.h"
|
||||
|
||||
extern const struct file_operations scoutfs_dir_fops;
|
||||
extern const struct inode_operations scoutfs_dir_iops;
|
||||
extern const struct inode_operations_wrapper scoutfs_dir_iops;
|
||||
extern const struct inode_operations scoutfs_symlink_iops;
|
||||
|
||||
struct scoutfs_link_backref_entry {
|
||||
@@ -14,7 +14,7 @@ struct scoutfs_link_backref_entry {
|
||||
u64 dir_pos;
|
||||
u16 name_len;
|
||||
struct scoutfs_dirent dent;
|
||||
/* the full name is allocated and stored in dent.name[0] */
|
||||
/* the full name is allocated and stored in dent.name[] */
|
||||
};
|
||||
|
||||
int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,
|
||||
|
||||
@@ -38,7 +38,7 @@ static bool ext_overlap(struct scoutfs_extent *ext, u64 start, u64 len)
|
||||
return !(e_end < start || ext->start > end);
|
||||
}
|
||||
|
||||
static bool ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
|
||||
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
|
||||
{
|
||||
u64 in_end = start + len - 1;
|
||||
u64 out_end = out->start + out->len - 1;
|
||||
@@ -241,7 +241,7 @@ int scoutfs_ext_remove(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
goto out;
|
||||
|
||||
/* removed extent must be entirely within found */
|
||||
if (!ext_inside(start, len, &found)) {
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -341,7 +341,7 @@ int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
if (ret == 0 && ext_overlap(&found, start, len)) {
|
||||
/* set extent must be entirely within found */
|
||||
if (!ext_inside(start, len, &found)) {
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -31,5 +31,6 @@ int scoutfs_ext_alloc(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
struct scoutfs_extent *ext);
|
||||
int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
void *arg, u64 start, u64 len, u64 map, u8 flags);
|
||||
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -66,8 +66,8 @@ struct forest_info {
|
||||
struct forest_info *name = SCOUTFS_SB(sb)->forest_info
|
||||
|
||||
struct forest_refs {
|
||||
struct scoutfs_btree_ref fs_ref;
|
||||
struct scoutfs_btree_ref logs_ref;
|
||||
struct scoutfs_block_ref fs_ref;
|
||||
struct scoutfs_block_ref logs_ref;
|
||||
};
|
||||
|
||||
/* initialize some refs that initially aren't equal */
|
||||
@@ -96,20 +96,16 @@ static void calc_bloom_nrs(struct forest_bloom_nrs *bloom,
|
||||
}
|
||||
}
|
||||
|
||||
static struct scoutfs_block *read_bloom_ref(struct super_block *sb,
|
||||
struct scoutfs_btree_ref *ref)
|
||||
static struct scoutfs_block *read_bloom_ref(struct super_block *sb, struct scoutfs_block_ref *ref)
|
||||
{
|
||||
struct scoutfs_block *bl;
|
||||
int ret;
|
||||
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (IS_ERR(bl))
|
||||
return bl;
|
||||
|
||||
if (!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno,
|
||||
SCOUTFS_BLOCK_MAGIC_BLOOM)) {
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
return ERR_PTR(-ESTALE);
|
||||
ret = scoutfs_block_read_ref(sb, ref, SCOUTFS_BLOCK_MAGIC_BLOOM, &bl);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE)
|
||||
scoutfs_inc_counter(sb, forest_bloom_stale);
|
||||
bl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return bl;
|
||||
@@ -280,7 +276,6 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
scoutfs_inc_counter(sb, forest_read_items);
|
||||
calc_bloom_nrs(&bloom, &lock->start);
|
||||
|
||||
roots = lock->roots;
|
||||
retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
@@ -353,15 +348,9 @@ retry:
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
|
||||
return -EIO;
|
||||
prev_refs = refs;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -381,18 +370,14 @@ out:
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
struct scoutfs_block *new_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_bloom_block *bb;
|
||||
struct scoutfs_btree_ref *ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
struct forest_bloom_nrs bloom;
|
||||
int nr_set = 0;
|
||||
u64 blkno;
|
||||
u64 nr;
|
||||
int ret;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
nr = le64_to_cpu(finf->our_log.nr);
|
||||
@@ -410,53 +395,11 @@ int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
|
||||
ref = &finf->our_log.bloom_ref;
|
||||
|
||||
if (ref->blkno) {
|
||||
bl = read_bloom_ref(sb, ref);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
goto unlock;
|
||||
}
|
||||
bb = bl->data;
|
||||
}
|
||||
|
||||
if (!ref->blkno || !scoutfs_block_writer_is_dirty(sb, bl)) {
|
||||
|
||||
ret = scoutfs_alloc_meta(sb, finf->alloc, finf->wri, &blkno);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
new_bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(new_bl)) {
|
||||
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
|
||||
blkno);
|
||||
BUG_ON(err); /* could have dirtied */
|
||||
ret = PTR_ERR(new_bl);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (bl) {
|
||||
err = scoutfs_free_meta(sb, finf->alloc, finf->wri,
|
||||
le64_to_cpu(ref->blkno));
|
||||
BUG_ON(err); /* could have dirtied */
|
||||
memcpy(new_bl->data, bl->data, SCOUTFS_BLOCK_LG_SIZE);
|
||||
} else {
|
||||
memset(new_bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
}
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, finf->wri, new_bl);
|
||||
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = new_bl;
|
||||
bb = bl->data;
|
||||
new_bl = NULL;
|
||||
|
||||
bb->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BLOOM);
|
||||
bb->hdr.fsid = super->hdr.fsid;
|
||||
bb->hdr.blkno = cpu_to_le64(blkno);
|
||||
prandom_bytes(&bb->hdr.seq, sizeof(bb->hdr.seq));
|
||||
ref->blkno = bb->hdr.blkno;
|
||||
ref->seq = bb->hdr.seq;
|
||||
}
|
||||
ret = scoutfs_block_dirty_ref(sb, finf->alloc, finf->wri, ref, SCOUTFS_BLOCK_MAGIC_BLOOM,
|
||||
&bl, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
bb = bl->data;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bloom.nrs); i++) {
|
||||
if (!test_and_set_bit_le(bloom.nrs[i], bb->bits)) {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#ifndef _SCOUTFS_FORMAT_H_
|
||||
#define _SCOUTFS_FORMAT_H_
|
||||
|
||||
#define SCOUTFS_INTEROP_VERSION 0ULL
|
||||
#define SCOUTFS_INTEROP_VERSION_STR __stringify(0)
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
|
||||
@@ -11,6 +14,7 @@
|
||||
#define SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK 0x897e4a7d
|
||||
#define SCOUTFS_BLOCK_MAGIC_SRCH_PARENT 0xb23a2a05
|
||||
#define SCOUTFS_BLOCK_MAGIC_ALLOC_LIST 0x8a93ac83
|
||||
#define SCOUTFS_BLOCK_MAGIC_QUORUM 0xbc310868
|
||||
|
||||
/*
|
||||
* The super block, quorum block, and file data allocation granularity
|
||||
@@ -51,15 +55,19 @@
|
||||
#define SCOUTFS_SUPER_BLKNO ((64ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* A reasonably large region of aligned quorum blocks follow the super
|
||||
* block. Each voting cycle reads the entire region so we don't want it
|
||||
* to be too enormous. 256K seems like a reasonably chunky single IO.
|
||||
* The number of blocks in the region also determines the number of
|
||||
* mounts that have a reasonable probability of not overwriting each
|
||||
* other's random block locations.
|
||||
* A small number of quorum blocks follow the super block, enough of
|
||||
* them to match the starting offset of the super block so the region is
|
||||
* aligned to the power of two that contains it.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_BLKNO ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
#define SCOUTFS_QUORUM_BLOCKS ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
#define SCOUTFS_QUORUM_BLKNO (SCOUTFS_SUPER_BLKNO + 1)
|
||||
#define SCOUTFS_QUORUM_BLOCKS (SCOUTFS_SUPER_BLKNO - 1)
|
||||
|
||||
/*
|
||||
* Free metadata blocks start after the quorum blocks
|
||||
*/
|
||||
#define SCOUTFS_META_DEV_START_BLKNO \
|
||||
((SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >> \
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT)
|
||||
|
||||
/*
|
||||
* Start data on the data device aligned as well.
|
||||
@@ -78,11 +86,33 @@ struct scoutfs_timespec {
|
||||
__u8 __pad[4];
|
||||
};
|
||||
|
||||
/* XXX ipv6 */
|
||||
struct scoutfs_inet_addr {
|
||||
__le32 addr;
|
||||
enum scoutfs_inet_family {
|
||||
SCOUTFS_AF_NONE = 0,
|
||||
SCOUTFS_AF_IPV4 = 1,
|
||||
SCOUTFS_AF_IPV6 = 2,
|
||||
};
|
||||
|
||||
struct scoutfs_inet_addr4 {
|
||||
__le16 family;
|
||||
__le16 port;
|
||||
__u8 __pad[2];
|
||||
__le32 addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Not yet supported by code.
|
||||
*/
|
||||
struct scoutfs_inet_addr6 {
|
||||
__le16 family;
|
||||
__le16 port;
|
||||
__u8 addr[16];
|
||||
__le32 flow_info;
|
||||
__le32 scope_id;
|
||||
__u8 __pad[4];
|
||||
};
|
||||
|
||||
union scoutfs_inet_addr {
|
||||
struct scoutfs_inet_addr4 v4;
|
||||
struct scoutfs_inet_addr6 v6;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -98,6 +128,15 @@ struct scoutfs_block_header {
|
||||
__le64 blkno;
|
||||
};
|
||||
|
||||
/*
|
||||
* A reference to a block. The corresponding fields in the block_header
|
||||
* must match after having read the block contents.
|
||||
*/
|
||||
struct scoutfs_block_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* scoutfs identifies all file system metadata items by a small key
|
||||
* struct.
|
||||
@@ -173,19 +212,6 @@ struct scoutfs_key {
|
||||
#define skfl_neglen _sk_second
|
||||
#define skfl_blkno _sk_third
|
||||
|
||||
struct scoutfs_radix_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
union {
|
||||
struct scoutfs_radix_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
__le64 sm_total;
|
||||
__le64 lg_total;
|
||||
} refs[0];
|
||||
__le64 bits[0];
|
||||
};
|
||||
};
|
||||
|
||||
struct scoutfs_avl_root {
|
||||
__le16 node;
|
||||
};
|
||||
@@ -207,17 +233,12 @@ struct scoutfs_avl_node {
|
||||
*/
|
||||
#define SCOUTFS_BTREE_MAX_HEIGHT 20
|
||||
|
||||
struct scoutfs_btree_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* A height of X means that the first block read will have level X-1 and
|
||||
* the leaves will have level 0.
|
||||
*/
|
||||
struct scoutfs_btree_root {
|
||||
struct scoutfs_btree_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
__u8 height;
|
||||
__u8 __pad[7];
|
||||
};
|
||||
@@ -238,7 +259,7 @@ struct scoutfs_btree_block {
|
||||
__le16 mid_free_len;
|
||||
__u8 level;
|
||||
__u8 __pad[7];
|
||||
struct scoutfs_btree_item items[0];
|
||||
struct scoutfs_btree_item items[];
|
||||
/* leaf blocks have a fixed size item offset hash table at the end */
|
||||
};
|
||||
|
||||
@@ -258,18 +279,13 @@ struct scoutfs_btree_block {
|
||||
#define SCOUTFS_BTREE_LEAF_ITEM_HASH_BYTES \
|
||||
(SCOUTFS_BTREE_LEAF_ITEM_HASH_NR * sizeof(__le16))
|
||||
|
||||
struct scoutfs_alloc_list_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* first_nr tracks the nr of the first block in the list and is used for
|
||||
* allocation sizing. total_nr is the sum of the nr of all the blocks in
|
||||
* the list and is used for calculating total free block counts.
|
||||
*/
|
||||
struct scoutfs_alloc_list_head {
|
||||
struct scoutfs_alloc_list_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 total_nr;
|
||||
__le32 first_nr;
|
||||
__u8 __pad[4];
|
||||
@@ -288,10 +304,10 @@ struct scoutfs_alloc_list_head {
|
||||
*/
|
||||
struct scoutfs_alloc_list_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_alloc_list_ref next;
|
||||
struct scoutfs_block_ref next;
|
||||
__le32 start;
|
||||
__le32 nr;
|
||||
__le64 blknos[0]; /* naturally aligned for sorting */
|
||||
__le64 blknos[]; /* naturally aligned for sorting */
|
||||
};
|
||||
|
||||
#define SCOUTFS_ALLOC_LIST_MAX_BLOCKS \
|
||||
@@ -316,7 +332,7 @@ struct scoutfs_mounted_client_btree_val {
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_MOUNTED_CLIENT_VOTER (1 << 0)
|
||||
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
|
||||
|
||||
/*
|
||||
* srch files are a contiguous run of blocks with compressed entries
|
||||
@@ -334,15 +350,10 @@ struct scoutfs_srch_entry {
|
||||
|
||||
#define SCOUTFS_SRCH_ENTRY_MAX_BYTES (2 + (sizeof(__u64) * 3))
|
||||
|
||||
struct scoutfs_srch_ref {
|
||||
__le64 blkno;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
struct scoutfs_srch_file {
|
||||
struct scoutfs_srch_entry first;
|
||||
struct scoutfs_srch_entry last;
|
||||
struct scoutfs_srch_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 blocks;
|
||||
__le64 entries;
|
||||
__u8 height;
|
||||
@@ -351,13 +362,13 @@ struct scoutfs_srch_file {
|
||||
|
||||
struct scoutfs_srch_parent {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_srch_ref refs[0];
|
||||
struct scoutfs_block_ref refs[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_SRCH_PARENT_REFS \
|
||||
((SCOUTFS_BLOCK_LG_SIZE - \
|
||||
offsetof(struct scoutfs_srch_parent, refs)) / \
|
||||
sizeof(struct scoutfs_srch_ref))
|
||||
sizeof(struct scoutfs_block_ref))
|
||||
|
||||
struct scoutfs_srch_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
@@ -366,7 +377,7 @@ struct scoutfs_srch_block {
|
||||
struct scoutfs_srch_entry tail;
|
||||
__le32 entry_nr;
|
||||
__le32 entry_bytes;
|
||||
__u8 entries[0];
|
||||
__u8 entries[];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -428,7 +439,7 @@ struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root item_root;
|
||||
struct scoutfs_btree_ref bloom_ref;
|
||||
struct scoutfs_block_ref bloom_ref;
|
||||
struct scoutfs_alloc_root data_avail;
|
||||
struct scoutfs_alloc_root data_freed;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
@@ -441,7 +452,7 @@ struct scoutfs_log_item_value {
|
||||
__le64 vers;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -456,7 +467,7 @@ struct scoutfs_log_item_value {
|
||||
struct scoutfs_bloom_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 total_set;
|
||||
__le64 bits[0];
|
||||
__le64 bits[];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -538,7 +549,7 @@ struct scoutfs_xattr {
|
||||
__le16 val_len;
|
||||
__u8 name_len;
|
||||
__u8 __pad[5];
|
||||
__u8 name[0];
|
||||
__u8 name[];
|
||||
};
|
||||
|
||||
|
||||
@@ -547,56 +558,84 @@ struct scoutfs_xattr {
|
||||
|
||||
#define SCOUTFS_UUID_BYTES 16
|
||||
|
||||
/*
|
||||
* Mounts read all the quorum blocks and write to one random quorum
|
||||
* block during a cycle. The min cycle time limits the per-mount iop
|
||||
* load during elections. The random cycle delay makes it less likely
|
||||
* that mounts will read and write at the same time and miss each
|
||||
* other's writes. An election only completes if a quorum of mounts
|
||||
* vote for a leader before any of their elections timeout. This is
|
||||
* made less likely by the probability that mounts will overwrite each
|
||||
* others random block locations. The max quorum count limits that
|
||||
* probability. 9 mounts only have a 55% chance of writing to unique 4k
|
||||
* blocks in a 256k region. The election timeout is set to include
|
||||
* enough cycles to usually complete the election. Once a leader is
|
||||
* elected it spends a number of cycles writing out blocks with itself
|
||||
* logged as a leader. This reduces the possibility that servers
|
||||
* will have their log entries overwritten and not be fenced.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_MAX_COUNT 9
|
||||
#define SCOUTFS_QUORUM_CYCLE_LO_MS 10
|
||||
#define SCOUTFS_QUORUM_CYCLE_HI_MS 20
|
||||
#define SCOUTFS_QUORUM_TERM_LO_MS 250
|
||||
#define SCOUTFS_QUORUM_TERM_HI_MS 500
|
||||
#define SCOUTFS_QUORUM_ELECTED_LOG_CYCLES 10
|
||||
#define SCOUTFS_QUORUM_MAX_SLOTS 15
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
/*
|
||||
* To elect a leader, members race to have their variable election
|
||||
* timeouts expire. If they're first to send a vote request with a
|
||||
* greater term to a majority of waiting members they'll be elected with
|
||||
* a majority. If the timeouts are too close, the vote may be split and
|
||||
* everyone will wait for another cycle of variable timeouts to expire.
|
||||
*
|
||||
* These determine how long it will take to elect a leader once there's
|
||||
* no evidence of a server (no leader quorum blocks on mount; heartbeat
|
||||
* timeout expired.)
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_ELECT_MIN_MS 250
|
||||
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
|
||||
|
||||
/*
|
||||
* Once a leader is elected they send out heartbeats at regular
|
||||
* intervals to force members to wait the much longer heartbeat timeout.
|
||||
* Once heartbeat timeout expires without receiving a heartbeat they'll
|
||||
* switch over the performing elections.
|
||||
*
|
||||
* These determine how long it could take members to notice that a
|
||||
* leader has gone silent and start to elect a new leader.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct scoutfs_quorum_message {
|
||||
__le64 fsid;
|
||||
__le64 blkno;
|
||||
__le64 version;
|
||||
__le64 term;
|
||||
__le64 write_nr;
|
||||
__le64 voter_rid;
|
||||
__le64 vote_for_rid;
|
||||
__u8 type;
|
||||
__u8 from;
|
||||
__u8 __pad[2];
|
||||
__le32 crc;
|
||||
__u8 log_nr;
|
||||
__u8 __pad[3];
|
||||
struct scoutfs_quorum_log {
|
||||
__le64 term;
|
||||
__le64 rid;
|
||||
struct scoutfs_inet_addr addr;
|
||||
} log[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_LOG_MAX \
|
||||
((SCOUTFS_BLOCK_SM_SIZE - sizeof(struct scoutfs_quorum_block)) / \
|
||||
sizeof(struct scoutfs_quorum_log))
|
||||
/* a candidate requests a vote */
|
||||
#define SCOUTFS_QUORUM_MSG_REQUEST_VOTE 0
|
||||
/* followers send votes to candidates */
|
||||
#define SCOUTFS_QUORUM_MSG_VOTE 1
|
||||
/* elected leaders broadcast heartbeats to delay elections */
|
||||
#define SCOUTFS_QUORUM_MSG_HEARTBEAT 2
|
||||
/* leaders broadcast as they leave to break heartbeat timeout */
|
||||
#define SCOUTFS_QUORUM_MSG_RESIGNATION 3
|
||||
#define SCOUTFS_QUORUM_MSG_INVALID 4
|
||||
|
||||
/*
|
||||
* The version is currently always 0, but will be used by mounts to
|
||||
* discover that membership has changed.
|
||||
*/
|
||||
struct scoutfs_quorum_config {
|
||||
__le64 version;
|
||||
struct scoutfs_quorum_slot {
|
||||
union scoutfs_inet_addr addr;
|
||||
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 term;
|
||||
__le64 random_write_mark;
|
||||
__le64 flags;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 rid;
|
||||
struct scoutfs_timespec ts;
|
||||
} write, update_term, set_leader, clear_leader, fenced;
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
|
||||
|
||||
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
|
||||
|
||||
struct scoutfs_super_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 id;
|
||||
__le64 format_hash;
|
||||
__le64 version;
|
||||
__le64 flags;
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 next_ino;
|
||||
@@ -607,12 +646,7 @@ struct scoutfs_super_block {
|
||||
__le64 total_data_blocks;
|
||||
__le64 first_data_blkno;
|
||||
__le64 last_data_blkno;
|
||||
__le64 quorum_fenced_term;
|
||||
__le64 quorum_server_term;
|
||||
__le64 unmount_barrier;
|
||||
__u8 quorum_count;
|
||||
__u8 __pad[7];
|
||||
struct scoutfs_inet_addr server_addr;
|
||||
struct scoutfs_quorum_config qconf;
|
||||
struct scoutfs_alloc_root meta_alloc[2];
|
||||
struct scoutfs_alloc_root data_alloc;
|
||||
struct scoutfs_alloc_list_head server_meta_avail[2];
|
||||
@@ -695,7 +729,7 @@ struct scoutfs_dirent {
|
||||
__le64 pos;
|
||||
__u8 type;
|
||||
__u8 __pad[7];
|
||||
__u8 name[0];
|
||||
__u8 name[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NAME_LEN 255
|
||||
@@ -746,12 +780,6 @@ enum scoutfs_dentry_type {
|
||||
* the same serer after receiving a greeting response and to a new
|
||||
* server after failover.
|
||||
*
|
||||
* @unmount_barrier: Incremented every time the remaining majority of
|
||||
* quorum members all agree to leave. The server tells a quorum member
|
||||
* the value that it's connecting under so that if the client sees the
|
||||
* value increase in the super block then it knows that the server has
|
||||
* processed its farewell and can safely unmount.
|
||||
*
|
||||
* @rid: The client's random id that was generated once as the mount
|
||||
* started up. This identifies a specific remote mount across
|
||||
* connections and servers. It's set to the client's rid in both the
|
||||
@@ -759,15 +787,14 @@ enum scoutfs_dentry_type {
|
||||
*/
|
||||
struct scoutfs_net_greeting {
|
||||
__le64 fsid;
|
||||
__le64 format_hash;
|
||||
__le64 version;
|
||||
__le64 server_term;
|
||||
__le64 unmount_barrier;
|
||||
__le64 rid;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_GREETING_FLAG_FAREWELL (1 << 0)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_VOTER (1 << 1)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_QUORUM (1 << 1)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_INVALID (~(__u64)0 << 2)
|
||||
|
||||
/*
|
||||
@@ -800,7 +827,7 @@ struct scoutfs_net_header {
|
||||
__u8 flags;
|
||||
__u8 error;
|
||||
__u8 __pad[3];
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_FLAG_RESPONSE (1 << 0)
|
||||
@@ -868,15 +895,10 @@ struct scoutfs_net_lock {
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock_grant_response {
|
||||
struct scoutfs_net_lock nl;
|
||||
struct scoutfs_net_roots roots;
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock_recover {
|
||||
__le16 nr;
|
||||
__u8 __pad[6];
|
||||
struct scoutfs_net_lock locks[0];
|
||||
struct scoutfs_net_lock locks[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_LOCK_MAX_RECOVER_NR \
|
||||
|
||||
166
kmod/src/inode.c
166
kmod/src/inode.c
@@ -71,29 +71,30 @@ static struct kmem_cache *scoutfs_inode_cachep;
|
||||
*/
|
||||
static void scoutfs_inode_ctor(void *obj)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = obj;
|
||||
struct scoutfs_inode_info *si = obj;
|
||||
|
||||
mutex_init(&ci->item_mutex);
|
||||
seqcount_init(&ci->seqcount);
|
||||
ci->staging = false;
|
||||
scoutfs_per_task_init(&ci->pt_data_lock);
|
||||
atomic64_set(&ci->data_waitq.changed, 0);
|
||||
init_waitqueue_head(&ci->data_waitq.waitq);
|
||||
init_rwsem(&ci->xattr_rwsem);
|
||||
RB_CLEAR_NODE(&ci->writeback_node);
|
||||
init_rwsem(&si->extent_sem);
|
||||
mutex_init(&si->item_mutex);
|
||||
seqcount_init(&si->seqcount);
|
||||
si->staging = false;
|
||||
scoutfs_per_task_init(&si->pt_data_lock);
|
||||
atomic64_set(&si->data_waitq.changed, 0);
|
||||
init_waitqueue_head(&si->data_waitq.waitq);
|
||||
init_rwsem(&si->xattr_rwsem);
|
||||
RB_CLEAR_NODE(&si->writeback_node);
|
||||
|
||||
inode_init_once(&ci->inode);
|
||||
inode_init_once(&si->inode);
|
||||
}
|
||||
|
||||
struct inode *scoutfs_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_inode_info *ci;
|
||||
struct scoutfs_inode_info *si;
|
||||
|
||||
ci = kmem_cache_alloc(scoutfs_inode_cachep, GFP_NOFS);
|
||||
if (!ci)
|
||||
si = kmem_cache_alloc(scoutfs_inode_cachep, GFP_NOFS);
|
||||
if (!si)
|
||||
return NULL;
|
||||
|
||||
return &ci->inode;
|
||||
return &si->inode;
|
||||
}
|
||||
|
||||
static void scoutfs_i_callback(struct rcu_head *head)
|
||||
@@ -181,7 +182,8 @@ static void set_inode_ops(struct inode *inode)
|
||||
inode->i_fop = &scoutfs_file_fops;
|
||||
break;
|
||||
case S_IFDIR:
|
||||
inode->i_op = &scoutfs_dir_iops;
|
||||
inode->i_op = &scoutfs_dir_iops.ops;
|
||||
inode->i_flags |= S_IOPS_WRAPPER;
|
||||
inode->i_fop = &scoutfs_dir_fops;
|
||||
break;
|
||||
case S_IFLNK:
|
||||
@@ -221,7 +223,7 @@ static void set_item_info(struct scoutfs_inode_info *si,
|
||||
|
||||
static void load_inode(struct inode *inode, struct scoutfs_inode *cinode)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
i_size_write(inode, le64_to_cpu(cinode->size));
|
||||
set_nlink(inode, le32_to_cpu(cinode->nlink));
|
||||
@@ -236,23 +238,23 @@ static void load_inode(struct inode *inode, struct scoutfs_inode *cinode)
|
||||
inode->i_ctime.tv_sec = le64_to_cpu(cinode->ctime.sec);
|
||||
inode->i_ctime.tv_nsec = le32_to_cpu(cinode->ctime.nsec);
|
||||
|
||||
ci->meta_seq = le64_to_cpu(cinode->meta_seq);
|
||||
ci->data_seq = le64_to_cpu(cinode->data_seq);
|
||||
ci->data_version = le64_to_cpu(cinode->data_version);
|
||||
ci->online_blocks = le64_to_cpu(cinode->online_blocks);
|
||||
ci->offline_blocks = le64_to_cpu(cinode->offline_blocks);
|
||||
ci->next_readdir_pos = le64_to_cpu(cinode->next_readdir_pos);
|
||||
ci->next_xattr_id = le64_to_cpu(cinode->next_xattr_id);
|
||||
ci->flags = le32_to_cpu(cinode->flags);
|
||||
si->meta_seq = le64_to_cpu(cinode->meta_seq);
|
||||
si->data_seq = le64_to_cpu(cinode->data_seq);
|
||||
si->data_version = le64_to_cpu(cinode->data_version);
|
||||
si->online_blocks = le64_to_cpu(cinode->online_blocks);
|
||||
si->offline_blocks = le64_to_cpu(cinode->offline_blocks);
|
||||
si->next_readdir_pos = le64_to_cpu(cinode->next_readdir_pos);
|
||||
si->next_xattr_id = le64_to_cpu(cinode->next_xattr_id);
|
||||
si->flags = le32_to_cpu(cinode->flags);
|
||||
|
||||
/*
|
||||
* i_blocks is initialized from online and offline and is then
|
||||
* maintained as blocks come and go.
|
||||
*/
|
||||
inode->i_blocks = (ci->online_blocks + ci->offline_blocks)
|
||||
inode->i_blocks = (si->online_blocks + si->offline_blocks)
|
||||
<< SCOUTFS_BLOCK_SM_SECTOR_SHIFT;
|
||||
|
||||
set_item_info(ci, cinode);
|
||||
set_item_info(si, cinode);
|
||||
}
|
||||
|
||||
static void init_inode_key(struct scoutfs_key *key, u64 ino)
|
||||
@@ -334,7 +336,7 @@ int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
u64 new_size, bool truncate)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
@@ -342,8 +344,7 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return 0;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true,
|
||||
SIC_DIRTY_INODE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -353,7 +354,7 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
truncate_setsize(inode, new_size);
|
||||
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
|
||||
if (truncate)
|
||||
ci->flags |= SCOUTFS_INO_FLAG_TRUNCATE;
|
||||
si->flags |= SCOUTFS_INO_FLAG_TRUNCATE;
|
||||
scoutfs_inode_set_data_seq(inode);
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
@@ -365,17 +366,16 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
|
||||
static int clear_truncate_flag(struct inode *inode, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_DIRTY_INODE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ci->flags &= ~SCOUTFS_INO_FLAG_TRUNCATE;
|
||||
si->flags &= ~SCOUTFS_INO_FLAG_TRUNCATE;
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
@@ -386,13 +386,13 @@ static int clear_truncate_flag(struct inode *inode, struct scoutfs_lock *lock)
|
||||
|
||||
int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
u64 start;
|
||||
int ret, err;
|
||||
|
||||
trace_scoutfs_complete_truncate(inode, ci->flags);
|
||||
trace_scoutfs_complete_truncate(inode, si->flags);
|
||||
|
||||
if (!(ci->flags & SCOUTFS_INO_FLAG_TRUNCATE))
|
||||
if (!(si->flags & SCOUTFS_INO_FLAG_TRUNCATE))
|
||||
return 0;
|
||||
|
||||
start = (i_size_read(inode) + SCOUTFS_BLOCK_SM_SIZE - 1) >>
|
||||
@@ -486,8 +486,7 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
|
||||
SIC_DIRTY_INODE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -643,19 +642,19 @@ void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off)
|
||||
|
||||
static int scoutfs_iget_test(struct inode *inode, void *arg)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
u64 *ino = arg;
|
||||
|
||||
return ci->ino == *ino;
|
||||
return si->ino == *ino;
|
||||
}
|
||||
|
||||
static int scoutfs_iget_set(struct inode *inode, void *arg)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
u64 *ino = arg;
|
||||
|
||||
inode->i_ino = *ino;
|
||||
ci->ino = *ino;
|
||||
si->ino = *ino;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -705,7 +704,7 @@ out:
|
||||
|
||||
static void store_inode(struct scoutfs_inode *cinode, struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *ci = SCOUTFS_I(inode);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
u64 online_blocks;
|
||||
u64 offline_blocks;
|
||||
|
||||
@@ -732,9 +731,9 @@ static void store_inode(struct scoutfs_inode *cinode, struct inode *inode)
|
||||
cinode->data_version = cpu_to_le64(scoutfs_inode_data_version(inode));
|
||||
cinode->online_blocks = cpu_to_le64(online_blocks);
|
||||
cinode->offline_blocks = cpu_to_le64(offline_blocks);
|
||||
cinode->next_readdir_pos = cpu_to_le64(ci->next_readdir_pos);
|
||||
cinode->next_xattr_id = cpu_to_le64(ci->next_xattr_id);
|
||||
cinode->flags = cpu_to_le32(ci->flags);
|
||||
cinode->next_readdir_pos = cpu_to_le64(si->next_readdir_pos);
|
||||
cinode->next_xattr_id = cpu_to_le64(si->next_xattr_id);
|
||||
cinode->flags = cpu_to_le32(si->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1188,8 +1187,7 @@ int scoutfs_inode_index_start(struct super_block *sb, u64 *seq)
|
||||
* Returns > 0 if the seq changed and the locks should be retried.
|
||||
*/
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq,
|
||||
const struct scoutfs_item_count cnt)
|
||||
struct list_head *list, u64 seq)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct index_lock *ind_lock;
|
||||
@@ -1205,7 +1203,7 @@ int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_hold_trans(sb, cnt);
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (ret == 0 && seq != sbi->trans_seq) {
|
||||
scoutfs_release_trans(sb);
|
||||
ret = 1;
|
||||
@@ -1219,8 +1217,7 @@ out:
|
||||
}
|
||||
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq,
|
||||
const struct scoutfs_item_count cnt)
|
||||
bool set_data_seq)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int ret;
|
||||
@@ -1230,7 +1227,7 @@ int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
ret = scoutfs_inode_index_start(sb, &seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, list, inode,
|
||||
set_data_seq) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, list, seq, cnt);
|
||||
scoutfs_inode_index_try_lock_hold(sb, list, seq);
|
||||
} while (ret > 0);
|
||||
|
||||
return ret;
|
||||
@@ -1368,7 +1365,7 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t rdev, u64 ino,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_inode_info *ci;
|
||||
struct scoutfs_inode_info *si;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_inode sinode;
|
||||
struct inode *inode;
|
||||
@@ -1378,16 +1375,16 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ci = SCOUTFS_I(inode);
|
||||
ci->ino = ino;
|
||||
ci->data_version = 0;
|
||||
ci->online_blocks = 0;
|
||||
ci->offline_blocks = 0;
|
||||
ci->next_readdir_pos = SCOUTFS_DIRENT_FIRST_POS;
|
||||
ci->next_xattr_id = 0;
|
||||
ci->have_item = false;
|
||||
atomic64_set(&ci->last_refreshed, lock->refresh_gen);
|
||||
ci->flags = 0;
|
||||
si = SCOUTFS_I(inode);
|
||||
si->ino = ino;
|
||||
si->data_version = 0;
|
||||
si->online_blocks = 0;
|
||||
si->offline_blocks = 0;
|
||||
si->next_readdir_pos = SCOUTFS_DIRENT_FIRST_POS;
|
||||
si->next_xattr_id = 0;
|
||||
si->have_item = false;
|
||||
atomic64_set(&si->last_refreshed, lock->refresh_gen);
|
||||
si->flags = 0;
|
||||
|
||||
scoutfs_inode_set_meta_seq(inode);
|
||||
scoutfs_inode_set_data_seq(inode);
|
||||
@@ -1421,7 +1418,18 @@ static void init_orphan_key(struct scoutfs_key *key, u64 rid, u64 ino)
|
||||
};
|
||||
}
|
||||
|
||||
static int remove_orphan_item(struct super_block *sb, u64 ino)
|
||||
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_lock *lock = sbi->rid_lock;
|
||||
struct scoutfs_key key;
|
||||
|
||||
init_orphan_key(&key, sbi->rid, ino);
|
||||
|
||||
return scoutfs_item_dirty(sb, &key, lock);
|
||||
}
|
||||
|
||||
int scoutfs_orphan_delete(struct super_block *sb, u64 ino)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_lock *lock = sbi->rid_lock;
|
||||
@@ -1498,8 +1506,7 @@ static int delete_inode_items(struct super_block *sb, u64 ino)
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
prepare_index_deletion(sb, &ind_locks, ino, mode, &sinode) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_DROP_INODE(mode, size));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -1521,7 +1528,7 @@ retry:
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = remove_orphan_item(sb, ino);
|
||||
ret = scoutfs_orphan_delete(sb, ino);
|
||||
out:
|
||||
if (release)
|
||||
scoutfs_release_trans(sb);
|
||||
@@ -1626,19 +1633,28 @@ int scoutfs_orphan_inode(struct inode *inode)
|
||||
}
|
||||
|
||||
/*
|
||||
* Track an inode that could have dirty pages. Used to kick off writeback
|
||||
* on all dirty pages during transaction commit without tying ourselves in
|
||||
* knots trying to call through the high level vfs sync methods.
|
||||
* Track an inode that could have dirty pages. Used to kick off
|
||||
* writeback on all dirty pages during transaction commit without tying
|
||||
* ourselves in knots trying to call through the high level vfs sync
|
||||
* methods.
|
||||
*
|
||||
* This is called by writers who hold the inode and transaction. The
|
||||
* inode's presence in the rbtree is removed by destroy_inode, prevented
|
||||
* by the inode hold, and by committing the transaction, which is
|
||||
* prevented by holding the transaction. The inode can only go from
|
||||
* empty to on the rbtree while we're here.
|
||||
*/
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode)
|
||||
{
|
||||
DECLARE_INODE_SB_INFO(inode->i_sb, inf);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
spin_lock(&inf->writeback_lock);
|
||||
if (RB_EMPTY_NODE(&si->writeback_node))
|
||||
insert_writeback_inode(inf, si);
|
||||
spin_unlock(&inf->writeback_lock);
|
||||
if (RB_EMPTY_NODE(&si->writeback_node)) {
|
||||
spin_lock(&inf->writeback_lock);
|
||||
if (RB_EMPTY_NODE(&si->writeback_node))
|
||||
insert_writeback_inode(inf, si);
|
||||
spin_unlock(&inf->writeback_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include "key.h"
|
||||
#include "lock.h"
|
||||
#include "per_task.h"
|
||||
#include "count.h"
|
||||
#include "format.h"
|
||||
#include "data.h"
|
||||
|
||||
@@ -22,6 +21,14 @@ struct scoutfs_inode_info {
|
||||
u64 offline_blocks;
|
||||
u32 flags;
|
||||
|
||||
/*
|
||||
* Protects per-inode extent items, most particularly readers
|
||||
* who want to serialize writers without holding i_mutex. (only
|
||||
* used in data.c, it's the only place that understands file
|
||||
* extent items)
|
||||
*/
|
||||
struct rw_semaphore extent_sem;
|
||||
|
||||
/*
|
||||
* The in-memory item info caches the current index item values
|
||||
* so that we can decide to update them with comparisons instead
|
||||
@@ -75,11 +82,9 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
|
||||
struct list_head *list, u64 ino,
|
||||
umode_t mode);
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq,
|
||||
const struct scoutfs_item_count cnt);
|
||||
struct list_head *list, u64 seq);
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq,
|
||||
const struct scoutfs_item_count cnt);
|
||||
bool set_data_seq);
|
||||
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
|
||||
|
||||
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);
|
||||
@@ -109,6 +114,8 @@ int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_scan_orphans(struct super_block *sb);
|
||||
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino);
|
||||
int scoutfs_orphan_delete(struct super_block *sb, u64 ino);
|
||||
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode);
|
||||
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);
|
||||
|
||||
104
kmod/src/ioctl.c
104
kmod/src/ioctl.c
@@ -12,6 +12,7 @@
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/uio.h>
|
||||
@@ -274,8 +275,8 @@ static long scoutfs_ioc_release(struct file *file, unsigned long arg)
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_ioctl_release args;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
loff_t start;
|
||||
loff_t end_inc;
|
||||
u64 sblock;
|
||||
u64 eblock;
|
||||
u64 online;
|
||||
u64 offline;
|
||||
u64 isize;
|
||||
@@ -286,9 +287,11 @@ static long scoutfs_ioc_release(struct file *file, unsigned long arg)
|
||||
|
||||
trace_scoutfs_ioc_release(sb, scoutfs_ino(inode), &args);
|
||||
|
||||
if (args.count == 0)
|
||||
if (args.length == 0)
|
||||
return 0;
|
||||
if ((args.block + args.count) < args.block)
|
||||
if (((args.offset + args.length) < args.offset) ||
|
||||
(args.offset & SCOUTFS_BLOCK_SM_MASK) ||
|
||||
(args.length & SCOUTFS_BLOCK_SM_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -321,23 +324,24 @@ static long scoutfs_ioc_release(struct file *file, unsigned long arg)
|
||||
inode_dio_wait(inode);
|
||||
|
||||
/* drop all clean and dirty cached blocks in the range */
|
||||
start = args.block << SCOUTFS_BLOCK_SM_SHIFT;
|
||||
end_inc = ((args.block + args.count) << SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
truncate_inode_pages_range(&inode->i_data, start, end_inc);
|
||||
truncate_inode_pages_range(&inode->i_data, args.offset,
|
||||
args.offset + args.length - 1);
|
||||
|
||||
sblock = args.offset >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
eblock = (args.offset + args.length - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
ret = scoutfs_data_truncate_items(sb, inode, scoutfs_ino(inode),
|
||||
args.block,
|
||||
args.block + args.count - 1, true,
|
||||
sblock,
|
||||
eblock, true,
|
||||
lock);
|
||||
if (ret == 0) {
|
||||
scoutfs_inode_get_onoff(inode, &online, &offline);
|
||||
isize = i_size_read(inode);
|
||||
if (online == 0 && isize) {
|
||||
start = (isize + SCOUTFS_BLOCK_SM_SIZE - 1)
|
||||
sblock = (isize + SCOUTFS_BLOCK_SM_SIZE - 1)
|
||||
>> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
ret = scoutfs_data_truncate_items(sb, inode,
|
||||
scoutfs_ino(inode),
|
||||
start, U64_MAX,
|
||||
sblock, U64_MAX,
|
||||
false, lock);
|
||||
}
|
||||
}
|
||||
@@ -459,23 +463,24 @@ static long scoutfs_ioc_stage(struct file *file, unsigned long arg)
|
||||
|
||||
trace_scoutfs_ioc_stage(sb, scoutfs_ino(inode), &args);
|
||||
|
||||
end_size = args.offset + args.count;
|
||||
end_size = args.offset + args.length;
|
||||
|
||||
/* verify arg constraints that aren't dependent on file */
|
||||
if (args.count < 0 || (end_size < args.offset) ||
|
||||
args.offset & SCOUTFS_BLOCK_SM_MASK)
|
||||
if (args.length < 0 || (end_size < args.offset) ||
|
||||
args.offset & SCOUTFS_BLOCK_SM_MASK) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args.count == 0)
|
||||
if (args.length == 0)
|
||||
return 0;
|
||||
|
||||
/* the iocb is really only used for the file pointer :P */
|
||||
init_sync_kiocb(&kiocb, file);
|
||||
kiocb.ki_pos = args.offset;
|
||||
kiocb.ki_left = args.count;
|
||||
kiocb.ki_nbytes = args.count;
|
||||
kiocb.ki_left = args.length;
|
||||
kiocb.ki_nbytes = args.length;
|
||||
iov.iov_base = (void __user *)(unsigned long)args.buf_ptr;
|
||||
iov.iov_len = args.count;
|
||||
iov.iov_len = args.length;
|
||||
|
||||
ret = mnt_want_write_file(file);
|
||||
if (ret)
|
||||
@@ -514,11 +519,11 @@ static long scoutfs_ioc_stage(struct file *file, unsigned long arg)
|
||||
written = 0;
|
||||
do {
|
||||
ret = generic_file_buffered_write(&kiocb, &iov, 1, pos, &pos,
|
||||
args.count, written);
|
||||
args.length, written);
|
||||
BUG_ON(ret == -EIOCBQUEUED);
|
||||
if (ret > 0)
|
||||
written += ret;
|
||||
} while (ret > 0 && written < args.count);
|
||||
} while (ret > 0 && written < args.length);
|
||||
|
||||
si->staging = false;
|
||||
current->backing_dev_info = NULL;
|
||||
@@ -669,8 +674,7 @@ static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
set_data_seq = sm.data_version != 0 ? true : false;
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq,
|
||||
SIC_SETATTR_MORE());
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
@@ -933,6 +937,60 @@ static long scoutfs_ioc_alloc_detail(struct file *file, unsigned long arg)
|
||||
args.copied;
|
||||
}
|
||||
|
||||
static long scoutfs_ioc_move_blocks(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct inode *to = file_inode(file);
|
||||
struct super_block *sb = to->i_sb;
|
||||
struct scoutfs_ioctl_move_blocks __user *umb = (void __user *)arg;
|
||||
struct scoutfs_ioctl_move_blocks mb;
|
||||
struct file *from_file;
|
||||
struct inode *from;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&mb, umb, sizeof(mb)))
|
||||
return -EFAULT;
|
||||
|
||||
if (mb.len == 0)
|
||||
return 0;
|
||||
|
||||
if (mb.from_off + mb.len < mb.from_off ||
|
||||
mb.to_off + mb.len < mb.to_off)
|
||||
return -EOVERFLOW;
|
||||
|
||||
from_file = fget(mb.from_fd);
|
||||
if (!from_file)
|
||||
return -EBADF;
|
||||
from = file_inode(from_file);
|
||||
|
||||
if (from == to) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (from->i_sb != sb) {
|
||||
ret = -EXDEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mb.flags & SCOUTFS_IOC_MB_UNKNOWN) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = mnt_want_write_file(file);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_data_move_blocks(from, mb.from_off, mb.len,
|
||||
to, mb.to_off, !!(mb.flags & SCOUTFS_IOC_MB_STAGE),
|
||||
mb.data_version);
|
||||
mnt_drop_write_file(file);
|
||||
out:
|
||||
fput(from_file);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
@@ -960,6 +1018,8 @@ long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
return scoutfs_ioc_data_wait_err(file, arg);
|
||||
case SCOUTFS_IOC_ALLOC_DETAIL:
|
||||
return scoutfs_ioc_alloc_detail(file, arg);
|
||||
case SCOUTFS_IOC_MOVE_BLOCKS:
|
||||
return scoutfs_ioc_move_blocks(file, arg);
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
|
||||
@@ -163,7 +163,7 @@ struct scoutfs_ioctl_ino_path_result {
|
||||
__u64 dir_pos;
|
||||
__u16 path_bytes;
|
||||
__u8 _pad[6];
|
||||
__u8 path[0];
|
||||
__u8 path[];
|
||||
};
|
||||
|
||||
/* Get a single path from the root to the given inode number */
|
||||
@@ -176,8 +176,8 @@ struct scoutfs_ioctl_ino_path_result {
|
||||
* an offline record is left behind to trigger demand staging if the
|
||||
* file is read.
|
||||
*
|
||||
* The starting block offset and number of blocks to release are in
|
||||
* units 4KB blocks.
|
||||
* The starting file offset and number of bytes to release must be in
|
||||
* multiples of 4KB.
|
||||
*
|
||||
* The specified range can extend past i_size and can straddle sparse
|
||||
* regions or blocks that are already offline. The only change it makes
|
||||
@@ -193,8 +193,8 @@ struct scoutfs_ioctl_ino_path_result {
|
||||
* presentation of the data in the file.
|
||||
*/
|
||||
struct scoutfs_ioctl_release {
|
||||
__u64 block;
|
||||
__u64 count;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
__u64 data_version;
|
||||
};
|
||||
|
||||
@@ -205,7 +205,7 @@ struct scoutfs_ioctl_stage {
|
||||
__u64 data_version;
|
||||
__u64 buf_ptr;
|
||||
__u64 offset;
|
||||
__s32 count;
|
||||
__s32 length;
|
||||
__u32 _pad;
|
||||
};
|
||||
|
||||
@@ -259,7 +259,7 @@ struct scoutfs_ioctl_data_waiting {
|
||||
__u8 _pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U8_MAX << 0)
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U64_MAX << 0)
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOR(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
struct scoutfs_ioctl_data_waiting)
|
||||
@@ -279,7 +279,7 @@ struct scoutfs_ioctl_setattr_more {
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_OFFLINE (1 << 0)
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U8_MAX << 1)
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U64_MAX << 1)
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE _IOW(SCOUTFS_IOCTL_MAGIC, 7, \
|
||||
struct scoutfs_ioctl_setattr_more)
|
||||
@@ -395,9 +395,6 @@ struct scoutfs_ioctl_data_wait_err {
|
||||
struct scoutfs_ioctl_data_wait_err)
|
||||
|
||||
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOR(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
struct scoutfs_ioctl_alloc_detail)
|
||||
|
||||
struct scoutfs_ioctl_alloc_detail {
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_nr;
|
||||
@@ -413,4 +410,70 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOR(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
struct scoutfs_ioctl_alloc_detail)
|
||||
|
||||
/*
|
||||
* Move extents from one regular file to another at a different offset,
|
||||
* on the same file system.
|
||||
*
|
||||
* from_fd specifies the source file and the ioctl is called on the
|
||||
* destination file. Both files must have write access. from_off specifies
|
||||
* the byte offset in the source, to_off is the byte offset in the
|
||||
* destination, and len is the number of bytes in the region to move. All of
|
||||
* the offsets and lengths must be in multiples of 4KB, except in the case
|
||||
* where the from_off + len ends at the i_size of the source
|
||||
* file. data_version is only used when STAGE flag is set (see below). flags
|
||||
* field is currently only used to optionally specify STAGE behavior.
|
||||
*
|
||||
* This interface only moves extents which are block granular, it does
|
||||
* not perform RMW of sub-block byte extents and it does not overwrite
|
||||
* existing extents in the destination. It will split extents in the
|
||||
* source.
|
||||
*
|
||||
* Only extents within i_size on the source are moved. The destination
|
||||
* i_size will be updated if extents are moved beyond its current
|
||||
* i_size. The i_size update will maintain final partial blocks in the
|
||||
* source.
|
||||
*
|
||||
* If STAGE flag is not set, it will return an error if either of the files
|
||||
* have offline extents. It will return 0 when all of the extents in the
|
||||
* source region have been moved to the destination. Moving extents updates
|
||||
* the ctime, mtime, meta_seq, data_seq, and data_version fields of both the
|
||||
* source and destination inodes. If an error is returned then partial
|
||||
* progress may have been made and inode fields may have been updated.
|
||||
*
|
||||
* If STAGE flag is set, as above except destination range must be in an
|
||||
* offline extent. Fields are updated only for source inode.
|
||||
*
|
||||
* Errors specific to this interface include:
|
||||
*
|
||||
* EINVAL: from_off, len, or to_off aren't a multiple of 4KB; the source
|
||||
* and destination files are the same inode; either the source or
|
||||
* destination is not a regular file; the destination file has
|
||||
* an existing overlapping extent (if STAGE flag not set); the
|
||||
* destination range is not in an offline extent (if STAGE set).
|
||||
* EOVERFLOW: either from_off + len or to_off + len exceeded 64bits.
|
||||
* EBADF: from_fd isn't a valid open file descriptor.
|
||||
* EXDEV: the source and destination files are in different filesystems.
|
||||
* EISDIR: either the source or destination is a directory.
|
||||
* ENODATA: either the source or destination file have offline extents and
|
||||
* STAGE flag is not set.
|
||||
* ESTALE: data_version does not match destination data_version.
|
||||
*/
|
||||
#define SCOUTFS_IOC_MB_STAGE (1 << 0)
|
||||
#define SCOUTFS_IOC_MB_UNKNOWN (U64_MAX << 1)
|
||||
|
||||
struct scoutfs_ioctl_move_blocks {
|
||||
__u64 from_fd;
|
||||
__u64 from_off;
|
||||
__u64 len;
|
||||
__u64 to_off;
|
||||
__u64 data_version;
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOR(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
struct scoutfs_ioctl_move_blocks)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1339,7 +1339,10 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
/* split needs multiple items, sparse may not have enough */
|
||||
if (!left)
|
||||
return -ENOMEM;
|
||||
|
||||
compact_page_items(sb, pg, left);
|
||||
found = item_rbtree_walk(&pg->item_root, key, NULL, &par,
|
||||
&pnode);
|
||||
}
|
||||
|
||||
item = alloc_item(pg, key, liv, val, val_len);
|
||||
@@ -1491,6 +1494,8 @@ retry:
|
||||
rbtree_erase(&rd->node, &root);
|
||||
rbtree_insert(&rd->node, par, pnode, &cinf->pg_root);
|
||||
lru_accessed(sb, cinf, rd);
|
||||
trace_scoutfs_item_read_page(sb, key, &rd->start,
|
||||
&rd->end);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2342,6 +2347,8 @@ retry:
|
||||
write_lock(&pg->rwlock);
|
||||
|
||||
pgi = trim_page_intersection(sb, cinf, pg, right, start, end);
|
||||
trace_scoutfs_item_invalidate_page(sb, start, end,
|
||||
&pg->start, &pg->end, pgi);
|
||||
BUG_ON(pgi == PGI_DISJOINT); /* walk wouldn't ret disjoint */
|
||||
|
||||
if (pgi == PGI_INSIDE) {
|
||||
@@ -2364,9 +2371,9 @@ retry:
|
||||
/* inv was entirely inside page, done after bisect */
|
||||
write_trylock_will_succeed(&right->rwlock);
|
||||
rbtree_insert(&right->node, par, pnode, &cinf->pg_root);
|
||||
lru_accessed(sb, cinf, right);
|
||||
write_unlock(&right->rwlock);
|
||||
write_unlock(&pg->rwlock);
|
||||
lru_accessed(sb, cinf, right);
|
||||
right = NULL;
|
||||
break;
|
||||
}
|
||||
@@ -2396,7 +2403,6 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
struct active_reader *active;
|
||||
struct cached_page *tmp;
|
||||
struct cached_page *pg;
|
||||
LIST_HEAD(list);
|
||||
int nr;
|
||||
|
||||
if (sc->nr_to_scan == 0)
|
||||
@@ -2433,21 +2439,17 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
|
||||
__lru_remove(sb, cinf, pg);
|
||||
rbtree_erase(&pg->node, &cinf->pg_root);
|
||||
list_move_tail(&pg->lru_head, &list);
|
||||
invalidate_pcpu_page(pg);
|
||||
write_unlock(&pg->rwlock);
|
||||
|
||||
put_pg(sb, pg);
|
||||
|
||||
if (--nr == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock(&cinf->rwlock);
|
||||
spin_unlock(&cinf->lru_lock);
|
||||
|
||||
list_for_each_entry_safe(pg, tmp, &list, lru_head) {
|
||||
list_del_init(&pg->lru_head);
|
||||
put_pg(sb, pg);
|
||||
}
|
||||
out:
|
||||
return min_t(unsigned long, cinf->lru_pages, INT_MAX);
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
* relative to that lock state we resend.
|
||||
*/
|
||||
|
||||
#define GRACE_PERIOD_KT ms_to_ktime(2)
|
||||
#define GRACE_PERIOD_KT ms_to_ktime(10)
|
||||
|
||||
/*
|
||||
* allocated per-super, freed on unmount.
|
||||
@@ -638,7 +638,6 @@ static void lock_grant_worker(struct work_struct *work)
|
||||
struct lock_info *linfo = container_of(work, struct lock_info,
|
||||
grant_work);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_net_lock_grant_response *gr;
|
||||
struct scoutfs_net_lock *nl;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
@@ -648,8 +647,7 @@ static void lock_grant_worker(struct work_struct *work)
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->grant_list, grant_head) {
|
||||
gr = &lock->grant_resp;
|
||||
nl = &lock->grant_resp.nl;
|
||||
nl = &lock->grant_nl;
|
||||
|
||||
/* wait for reordered invalidation to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
@@ -667,7 +665,6 @@ static void lock_grant_worker(struct work_struct *work)
|
||||
lock->request_pending = 0;
|
||||
lock->mode = nl->new_mode;
|
||||
lock->write_version = le64_to_cpu(nl->write_version);
|
||||
lock->roots = gr->roots;
|
||||
|
||||
if (lock_count_match_exists(nl->new_mode, lock->waiters))
|
||||
extend_grace(sb, lock);
|
||||
@@ -689,9 +686,8 @@ static void lock_grant_worker(struct work_struct *work)
|
||||
* work to process.
|
||||
*/
|
||||
int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
struct scoutfs_net_lock_grant_response *gr)
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
struct scoutfs_net_lock *nl = &gr->nl;
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
|
||||
@@ -705,7 +701,7 @@ int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
trace_scoutfs_lock_grant_response(sb, lock);
|
||||
BUG_ON(!lock->request_pending);
|
||||
|
||||
lock->grant_resp = *gr;
|
||||
lock->grant_nl = *nl;
|
||||
list_add_tail(&lock->grant_head, &linfo->grant_list);
|
||||
queue_grant_work(linfo);
|
||||
|
||||
@@ -770,16 +766,6 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->inv_list, inv_head) {
|
||||
nl = &lock->inv_nl;
|
||||
|
||||
/* skip if grace hasn't elapsed, record earliest */
|
||||
deadline = lock->grace_deadline;
|
||||
if (ktime_before(now, deadline)) {
|
||||
delay = min(delay,
|
||||
nsecs_to_jiffies(ktime_to_ns(
|
||||
ktime_sub(deadline, now))));
|
||||
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* wait for reordered grant to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
continue;
|
||||
@@ -788,6 +774,15 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
if (!lock_counts_match(nl->new_mode, lock->users))
|
||||
continue;
|
||||
|
||||
/* skip if grace hasn't elapsed, record earliest */
|
||||
deadline = lock->grace_deadline;
|
||||
if (!linfo->shutdown && ktime_before(now, deadline)) {
|
||||
delay = min(delay,
|
||||
nsecs_to_jiffies(ktime_to_ns(
|
||||
ktime_sub(deadline, now))));
|
||||
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
|
||||
continue;
|
||||
}
|
||||
/* set the new mode, no incompatible users during inval */
|
||||
lock->mode = nl->new_mode;
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ struct scoutfs_lock {
|
||||
u64 refresh_gen;
|
||||
u64 write_version;
|
||||
u64 dirty_trans_seq;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct list_head lru_head;
|
||||
wait_queue_head_t waitq;
|
||||
ktime_t grace_deadline;
|
||||
@@ -31,7 +30,7 @@ struct scoutfs_lock {
|
||||
invalidate_pending:1;
|
||||
|
||||
struct list_head grant_head;
|
||||
struct scoutfs_net_lock_grant_response grant_resp;
|
||||
struct scoutfs_net_lock grant_nl;
|
||||
struct list_head inv_head;
|
||||
struct scoutfs_net_lock inv_nl;
|
||||
u64 inv_net_id;
|
||||
@@ -57,7 +56,7 @@ struct scoutfs_lock_coverage {
|
||||
};
|
||||
|
||||
int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
struct scoutfs_net_lock_grant_response *gr);
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
|
||||
@@ -484,7 +484,6 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
struct server_lock_node *snode)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct scoutfs_net_lock_grant_response gres;
|
||||
struct scoutfs_net_lock nl;
|
||||
struct client_lock_entry *req;
|
||||
struct client_lock_entry *req_tmp;
|
||||
@@ -547,11 +546,8 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
nl.write_version = cpu_to_le64(wv);
|
||||
}
|
||||
|
||||
gres.nl = nl;
|
||||
scoutfs_server_get_roots(sb, &gres.roots);
|
||||
|
||||
ret = scoutfs_server_lock_response(sb, req->rid,
|
||||
req->net_id, &gres);
|
||||
req->net_id, &nl);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -586,7 +582,9 @@ static void init_lock_clients_key(struct scoutfs_key *key, u64 rid)
|
||||
* the client had already talked to the server then we must find an
|
||||
* existing record for it and should begin recovery. If it doesn't have
|
||||
* a record then its timed out and we can't allow it to reconnect. If
|
||||
* its connecting for the first time then we insert a new record. If
|
||||
* we're creating a new record for a client we can see EEXIST if the
|
||||
* greeting is resent to a new server after the record was committed but
|
||||
* before the response was received by the client.
|
||||
*
|
||||
* This is running in concurrent client greeting processing contexts.
|
||||
*/
|
||||
@@ -611,6 +609,8 @@ int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
|
||||
ret = scoutfs_btree_insert(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients,
|
||||
&key, NULL, 0);
|
||||
if (ret == -EEXIST)
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&inf->mutex);
|
||||
|
||||
|
||||
@@ -944,7 +944,6 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
struct scoutfs_net_connection *acc_conn;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct socket *acc_sock;
|
||||
LIST_HEAD(conn_list);
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_listen_work_enter(sb, 0, 0);
|
||||
@@ -1546,9 +1545,8 @@ void scoutfs_net_client_greeting(struct super_block *sb,
|
||||
* response and they can disconnect cleanly.
|
||||
*
|
||||
* At this point our connection is idle except for send submissions and
|
||||
* shutdown being queued. Once we shut down a We completely own a We
|
||||
* have exclusive access to a previous conn once its shutdown and we set
|
||||
* _freeing.
|
||||
* shutdown being queued. We have exclusive access to the previous conn
|
||||
* once it's shutdown and we set _freeing.
|
||||
*/
|
||||
void scoutfs_net_server_greeting(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
|
||||
@@ -90,19 +90,13 @@ enum conn_flags {
|
||||
#define SIN_ARG(sin) sin, be16_to_cpu((sin)->sin_port)
|
||||
|
||||
static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
|
||||
struct scoutfs_inet_addr *addr)
|
||||
union scoutfs_inet_addr *addr)
|
||||
{
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->addr));
|
||||
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->port));
|
||||
}
|
||||
BUG_ON(addr->v4.family != cpu_to_le16(SCOUTFS_AF_IPV4));
|
||||
|
||||
static inline void scoutfs_addr_from_sin(struct scoutfs_inet_addr *addr,
|
||||
struct sockaddr_in *sin)
|
||||
{
|
||||
addr->addr = be32_to_le32(sin->sin_addr.s_addr);
|
||||
addr->port = be16_to_le16(sin->sin_port);
|
||||
memset(addr->__pad, 0, sizeof(addr->__pad));
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->v4.addr));
|
||||
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
|
||||
}
|
||||
|
||||
struct scoutfs_net_connection *
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include "super.h"
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_server_addr, "server_addr=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
@@ -43,46 +43,6 @@ u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The caller's string is null terminted and can be clobbered */
|
||||
static int parse_ipv4(struct super_block *sb, char *str,
|
||||
struct sockaddr_in *sin)
|
||||
{
|
||||
unsigned long port = 0;
|
||||
__be32 addr;
|
||||
char *c;
|
||||
int ret;
|
||||
|
||||
/* null term port, if specified */
|
||||
c = strchr(str, ':');
|
||||
if (c)
|
||||
*c = '\0';
|
||||
|
||||
/* parse addr */
|
||||
addr = in_aton(str);
|
||||
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
|
||||
ipv4_is_zeronet(addr) ||
|
||||
ipv4_is_local_multicast(addr)) {
|
||||
scoutfs_err(sb, "invalid unicast ipv4 address: %s", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* parse port, if specified */
|
||||
if (c) {
|
||||
c++;
|
||||
ret = kstrtoul(c, 0, &port);
|
||||
if (ret != 0 || port == 0 || port >= U16_MAX) {
|
||||
scoutfs_err(sb, "invalid port in ipv4 address: %s", c);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_addr.s_addr = addr;
|
||||
sin->sin_port = cpu_to_be16(port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
{
|
||||
@@ -132,14 +92,15 @@ out:
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
{
|
||||
char ipstr[INET_ADDRSTRLEN + 1];
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int nr;
|
||||
int token;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
@@ -147,12 +108,23 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_server_addr:
|
||||
case Opt_quorum_slot_nr:
|
||||
|
||||
match_strlcpy(ipstr, args, ARRAY_SIZE(ipstr));
|
||||
ret = parse_ipv4(sb, ipstr, &parsed->server_addr);
|
||||
if (ret < 0)
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
parsed->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@
|
||||
#include "format.h"
|
||||
|
||||
enum scoutfs_mount_options {
|
||||
Opt_server_addr,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
struct mount_options {
|
||||
struct sockaddr_in server_addr;
|
||||
int quorum_slot_nr;
|
||||
char *metadev_path;
|
||||
};
|
||||
|
||||
|
||||
1602
kmod/src/quorum.c
1602
kmod/src/quorum.c
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,15 @@
|
||||
#ifndef _SCOUTFS_QUORUM_H_
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_election(struct super_block *sb, ktime_t timeout_abs,
|
||||
u64 prev_term, u64 *elected_term);
|
||||
void scoutfs_quorum_clear_leader(struct super_block *sb);
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -31,7 +31,6 @@
|
||||
#include "lock.h"
|
||||
#include "super.h"
|
||||
#include "ioctl.h"
|
||||
#include "count.h"
|
||||
#include "export.h"
|
||||
#include "dir.h"
|
||||
#include "server.h"
|
||||
@@ -169,6 +168,40 @@ TRACE_EVENT(scoutfs_data_fallocate,
|
||||
__entry->len, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_data_move_blocks,
|
||||
TP_PROTO(struct super_block *sb, u64 from_ino, u64 from_start, u64 len,
|
||||
u64 map, u8 flags, u64 to_ino, u64 to_start),
|
||||
|
||||
TP_ARGS(sb, from_ino, from_start, len, map, flags, to_ino, to_start),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, from_ino)
|
||||
__field(__u64, from_start)
|
||||
__field(__u64, len)
|
||||
__field(__u64, map)
|
||||
__field(__u8, flags)
|
||||
__field(__u64, to_ino)
|
||||
__field(__u64, to_start)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->from_ino = from_ino;
|
||||
__entry->from_start = from_start;
|
||||
__entry->len = len;
|
||||
__entry->map = map;
|
||||
__entry->flags = flags;
|
||||
__entry->to_ino = to_ino;
|
||||
__entry->to_start = to_start;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" from_ino %llu from_start %llu len %llu map %llu flags 0x%x to_ino %llu to_start %llu\n",
|
||||
SCSB_TRACE_ARGS, __entry->from_ino, __entry->from_start,
|
||||
__entry->len, __entry->map, __entry->flags, __entry->to_ino,
|
||||
__entry->to_start)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_data_fiemap,
|
||||
TP_PROTO(struct super_block *sb, __u64 start, __u64 len, int ret),
|
||||
|
||||
@@ -390,135 +423,34 @@ TRACE_EVENT(scoutfs_trans_write_func,
|
||||
TP_printk(SCSBF" dirty %lu", SCSB_TRACE_ARGS, __entry->dirty)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_release_trans,
|
||||
TP_PROTO(struct super_block *sb, void *rsv, unsigned int rsv_holders,
|
||||
struct scoutfs_item_count *res,
|
||||
struct scoutfs_item_count *act, unsigned int tri_holders,
|
||||
unsigned int tri_writing, unsigned int tri_items,
|
||||
unsigned int tri_vals),
|
||||
DECLARE_EVENT_CLASS(scoutfs_trans_hold_release_class,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
|
||||
TP_ARGS(sb, rsv, rsv_holders, res, act, tri_holders, tri_writing,
|
||||
tri_items, tri_vals),
|
||||
TP_ARGS(sb, journal_info, holders),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, rsv)
|
||||
__field(unsigned int, rsv_holders)
|
||||
__field(int, res_items)
|
||||
__field(int, res_vals)
|
||||
__field(int, act_items)
|
||||
__field(int, act_vals)
|
||||
__field(unsigned int, tri_holders)
|
||||
__field(unsigned int, tri_writing)
|
||||
__field(unsigned int, tri_items)
|
||||
__field(unsigned int, tri_vals)
|
||||
__field(unsigned long, journal_info)
|
||||
__field(int, holders)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->rsv = rsv;
|
||||
__entry->rsv_holders = rsv_holders;
|
||||
__entry->res_items = res->items;
|
||||
__entry->res_vals = res->vals;
|
||||
__entry->act_items = act->items;
|
||||
__entry->act_vals = act->vals;
|
||||
__entry->tri_holders = tri_holders;
|
||||
__entry->tri_writing = tri_writing;
|
||||
__entry->tri_items = tri_items;
|
||||
__entry->tri_vals = tri_vals;
|
||||
__entry->journal_info = (unsigned long)journal_info;
|
||||
__entry->holders = holders;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rsv %p holders %u reserved %u.%u actual "
|
||||
"%d.%d, trans holders %u writing %u reserved "
|
||||
"%u.%u", SCSB_TRACE_ARGS, __entry->rsv, __entry->rsv_holders,
|
||||
__entry->res_items, __entry->res_vals, __entry->act_items,
|
||||
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
|
||||
__entry->tri_items, __entry->tri_vals)
|
||||
TP_printk(SCSBF" journal_info 0x%0lx holders %d",
|
||||
SCSB_TRACE_ARGS, __entry->journal_info, __entry->holders)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_acquired_hold,
|
||||
TP_PROTO(struct super_block *sb, const struct scoutfs_item_count *cnt,
|
||||
void *rsv, unsigned int rsv_holders,
|
||||
struct scoutfs_item_count *res,
|
||||
struct scoutfs_item_count *act, unsigned int tri_holders,
|
||||
unsigned int tri_writing, unsigned int tri_items,
|
||||
unsigned int tri_vals),
|
||||
|
||||
TP_ARGS(sb, cnt, rsv, rsv_holders, res, act, tri_holders, tri_writing,
|
||||
tri_items, tri_vals),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, cnt_items)
|
||||
__field(int, cnt_vals)
|
||||
__field(void *, rsv)
|
||||
__field(unsigned int, rsv_holders)
|
||||
__field(int, res_items)
|
||||
__field(int, res_vals)
|
||||
__field(int, act_items)
|
||||
__field(int, act_vals)
|
||||
__field(unsigned int, tri_holders)
|
||||
__field(unsigned int, tri_writing)
|
||||
__field(unsigned int, tri_items)
|
||||
__field(unsigned int, tri_vals)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->cnt_items = cnt->items;
|
||||
__entry->cnt_vals = cnt->vals;
|
||||
__entry->rsv = rsv;
|
||||
__entry->rsv_holders = rsv_holders;
|
||||
__entry->res_items = res->items;
|
||||
__entry->res_vals = res->vals;
|
||||
__entry->act_items = act->items;
|
||||
__entry->act_vals = act->vals;
|
||||
__entry->tri_holders = tri_holders;
|
||||
__entry->tri_writing = tri_writing;
|
||||
__entry->tri_items = tri_items;
|
||||
__entry->tri_vals = tri_vals;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" cnt %u.%u, rsv %p holders %u reserved %u.%u "
|
||||
"actual %d.%d, trans holders %u writing %u reserved "
|
||||
"%u.%u", SCSB_TRACE_ARGS, __entry->cnt_items,
|
||||
__entry->cnt_vals, __entry->rsv, __entry->rsv_holders,
|
||||
__entry->res_items, __entry->res_vals, __entry->act_items,
|
||||
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
|
||||
__entry->tri_items, __entry->tri_vals)
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_trans_acquired_hold,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_ARGS(sb, journal_info, holders)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_track_item,
|
||||
TP_PROTO(struct super_block *sb, int delta_items, int delta_vals,
|
||||
int act_items, int act_vals, int res_items, int res_vals),
|
||||
|
||||
TP_ARGS(sb, delta_items, delta_vals, act_items, act_vals, res_items,
|
||||
res_vals),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, delta_items)
|
||||
__field(int, delta_vals)
|
||||
__field(int, act_items)
|
||||
__field(int, act_vals)
|
||||
__field(int, res_items)
|
||||
__field(int, res_vals)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->delta_items = delta_items;
|
||||
__entry->delta_vals = delta_vals;
|
||||
__entry->act_items = act_items;
|
||||
__entry->act_vals = act_vals;
|
||||
__entry->res_items = res_items;
|
||||
__entry->res_vals = res_vals;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" delta_items %d delta_vals %d act_items %d act_vals %d res_items %d res_vals %d",
|
||||
SCSB_TRACE_ARGS, __entry->delta_items, __entry->delta_vals,
|
||||
__entry->act_items, __entry->act_vals, __entry->res_items,
|
||||
__entry->res_vals)
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_release_trans,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_ARGS(sb, journal_info, holders)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_ioc_release,
|
||||
@@ -530,22 +462,22 @@ TRACE_EVENT(scoutfs_ioc_release,
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, block)
|
||||
__field(__u64, count)
|
||||
__field(__u64, offset)
|
||||
__field(__u64, length)
|
||||
__field(__u64, vers)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->block = args->block;
|
||||
__entry->count = args->count;
|
||||
__entry->offset = args->offset;
|
||||
__entry->length = args->length;
|
||||
__entry->vers = args->data_version;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu block %llu count %llu vers %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->block,
|
||||
__entry->count, __entry->vers)
|
||||
TP_printk(SCSBF" ino %llu offset %llu length %llu vers %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->offset,
|
||||
__entry->length, __entry->vers)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_ino_ret_class, scoutfs_ioc_release_ret,
|
||||
@@ -564,7 +496,7 @@ TRACE_EVENT(scoutfs_ioc_stage,
|
||||
__field(__u64, ino)
|
||||
__field(__u64, vers)
|
||||
__field(__u64, offset)
|
||||
__field(__s32, count)
|
||||
__field(__s32, length)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -572,12 +504,12 @@ TRACE_EVENT(scoutfs_ioc_stage,
|
||||
__entry->ino = ino;
|
||||
__entry->vers = args->data_version;
|
||||
__entry->offset = args->offset;
|
||||
__entry->count = args->count;
|
||||
__entry->length = args->length;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu vers %llu offset %llu count %d",
|
||||
TP_printk(SCSBF" ino %llu vers %llu offset %llu length %d",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->vers,
|
||||
__entry->offset, __entry->count)
|
||||
__entry->offset, __entry->length)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_ioc_data_wait_err,
|
||||
@@ -1652,7 +1584,7 @@ TRACE_EVENT(scoutfs_get_name,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_read_error,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_ref *ref),
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *ref),
|
||||
|
||||
TP_ARGS(sb, ref),
|
||||
|
||||
@@ -1672,37 +1604,10 @@ TRACE_EVENT(scoutfs_btree_read_error,
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_dirty_block,
|
||||
TP_PROTO(struct super_block *sb, u64 blkno, u64 seq,
|
||||
u64 bt_blkno, u64 bt_seq),
|
||||
|
||||
TP_ARGS(sb, blkno, seq, bt_blkno, bt_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(__u64, seq)
|
||||
__field(__u64, bt_blkno)
|
||||
__field(__u64, bt_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = blkno;
|
||||
__entry->seq = seq;
|
||||
__entry->bt_blkno = bt_blkno;
|
||||
__entry->bt_seq = bt_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu seq %llu bt_blkno %llu bt_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->seq,
|
||||
__entry->bt_blkno, __entry->bt_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_walk,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key, int flags, int level,
|
||||
struct scoutfs_btree_ref *ref),
|
||||
struct scoutfs_block_ref *ref),
|
||||
|
||||
TP_ARGS(sb, root, key, flags, level, ref),
|
||||
|
||||
@@ -1838,118 +1743,69 @@ TRACE_EVENT(scoutfs_lock_message,
|
||||
__entry->old_mode, __entry->new_mode)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quorum_message_class,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election,
|
||||
TP_PROTO(struct super_block *sb, u64 prev_term),
|
||||
|
||||
TP_ARGS(sb, prev_term),
|
||||
TP_ARGS(sb, term, type, nr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, prev_term)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->prev_term = prev_term;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" prev_term %llu",
|
||||
SCSB_TRACE_ARGS, __entry->prev_term)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election_ret,
|
||||
TP_PROTO(struct super_block *sb, int ret, u64 elected_term),
|
||||
|
||||
TP_ARGS(sb, ret, elected_term),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, ret)
|
||||
__field(__u64, elected_term)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ret = ret;
|
||||
__entry->elected_term = elected_term;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ret %d elected_term %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ret, __entry->elected_term)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election_vote,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, u64 vote_for_rid,
|
||||
int votes, int log_cycles, int quorum_count),
|
||||
|
||||
TP_ARGS(sb, role, term, vote_for_rid, votes, log_cycles, quorum_count),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, role)
|
||||
__field(__u64, term)
|
||||
__field(__u64, vote_for_rid)
|
||||
__field(int, votes)
|
||||
__field(int, log_cycles)
|
||||
__field(int, quorum_count)
|
||||
__field(__u8, type)
|
||||
__field(int, nr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->role = role;
|
||||
__entry->term = term;
|
||||
__entry->vote_for_rid = vote_for_rid;
|
||||
__entry->votes = votes;
|
||||
__entry->log_cycles = log_cycles;
|
||||
__entry->quorum_count = quorum_count;
|
||||
__entry->type = type;
|
||||
__entry->nr = nr;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" role %d term %llu vote_for_rid %016llx votes %d log_cycles %d quorum_count %d",
|
||||
SCSB_TRACE_ARGS, __entry->role, __entry->term,
|
||||
__entry->vote_for_rid, __entry->votes, __entry->log_cycles,
|
||||
__entry->quorum_count)
|
||||
TP_printk(SCSBF" term %llu type %u nr %d",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->type, __entry->nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_send_message,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
TP_ARGS(sb, term, type, nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_recv_message,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
TP_ARGS(sb, term, type, nr)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quorum_block_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TRACE_EVENT(scoutfs_quorum_loop,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, int vote_for,
|
||||
unsigned long vote_bits, struct timespec64 timeout),
|
||||
|
||||
TP_ARGS(sb, blk),
|
||||
TP_ARGS(sb, role, term, vote_for, vote_bits, timeout),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(__u64, term)
|
||||
__field(__u64, write_nr)
|
||||
__field(__u64, voter_rid)
|
||||
__field(__u64, vote_for_rid)
|
||||
__field(__u32, crc)
|
||||
__field(__u8, log_nr)
|
||||
__field(int, role)
|
||||
__field(int, vote_for)
|
||||
__field(unsigned long, vote_bits)
|
||||
__field(unsigned long, vote_count)
|
||||
__field(unsigned long long, timeout_sec)
|
||||
__field(int, timeout_nsec)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = le64_to_cpu(blk->blkno);
|
||||
__entry->term = le64_to_cpu(blk->term);
|
||||
__entry->write_nr = le64_to_cpu(blk->write_nr);
|
||||
__entry->voter_rid = le64_to_cpu(blk->voter_rid);
|
||||
__entry->vote_for_rid = le64_to_cpu(blk->vote_for_rid);
|
||||
__entry->crc = le32_to_cpu(blk->crc);
|
||||
__entry->log_nr = blk->log_nr;
|
||||
__entry->term = term;
|
||||
__entry->role = role;
|
||||
__entry->vote_for = vote_for;
|
||||
__entry->vote_bits = vote_bits;
|
||||
__entry->vote_count = hweight_long(vote_bits);
|
||||
__entry->timeout_sec = timeout.tv_sec;
|
||||
__entry->timeout_nsec = timeout.tv_nsec;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu term %llu write_nr %llu voter_rid %016llx vote_for_rid %016llx crc 0x%08x log_nr %u",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->term,
|
||||
__entry->write_nr, __entry->voter_rid, __entry->vote_for_rid,
|
||||
__entry->crc, __entry->log_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_read_block,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TP_ARGS(sb, blk)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_write_block,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TP_ARGS(sb, blk)
|
||||
TP_printk(SCSBF" term %llu role %d vote_for %d vote_bits 0x%lx vote_count %lu timeout %llu.%u",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->role,
|
||||
__entry->vote_for, __entry->vote_bits, __entry->vote_count,
|
||||
__entry->timeout_sec, __entry->timeout_nsec)
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -1979,31 +1835,27 @@ DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_recv_clock_sync,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_advance,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 prev_seq,
|
||||
u64 next_seq),
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, prev_seq, next_seq),
|
||||
TP_ARGS(sb, rid, trans_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
__field(__u64, prev_seq)
|
||||
__field(__u64, next_seq)
|
||||
__field(__u64, trans_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
__entry->prev_seq = prev_seq;
|
||||
__entry->next_seq = next_seq;
|
||||
__entry->trans_seq = trans_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx prev_seq %llu next_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->prev_seq,
|
||||
__entry->next_seq)
|
||||
TP_printk(SCSBF" rid %016llx trans_seq %llu\n",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_farewell,
|
||||
TRACE_EVENT(scoutfs_trans_seq_remove,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, trans_seq),
|
||||
@@ -2083,8 +1935,8 @@ DEFINE_EVENT(scoutfs_forest_bloom_class, scoutfs_forest_bloom_search,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_forest_prepare_commit,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_ref *item_ref,
|
||||
struct scoutfs_btree_ref *bloom_ref),
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *item_ref,
|
||||
struct scoutfs_block_ref *bloom_ref),
|
||||
TP_ARGS(sb, item_ref, bloom_ref),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
@@ -2150,18 +2002,45 @@ TRACE_EVENT(scoutfs_forest_init_our_log,
|
||||
__entry->blkno, __entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_block_dirty_ref,
|
||||
TP_PROTO(struct super_block *sb, u64 ref_blkno, u64 ref_seq,
|
||||
u64 block_blkno, u64 block_seq),
|
||||
|
||||
TP_ARGS(sb, ref_blkno, ref_seq, block_blkno, block_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ref_blkno)
|
||||
__field(__u64, ref_seq)
|
||||
__field(__u64, block_blkno)
|
||||
__field(__u64, block_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ref_blkno = ref_blkno;
|
||||
__entry->ref_seq = ref_seq;
|
||||
__entry->block_blkno = block_blkno;
|
||||
__entry->block_seq = block_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ref_blkno %llu ref_seq %llu block_blkno %llu block_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ref_blkno, __entry->ref_seq,
|
||||
__entry->block_blkno, __entry->block_seq)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved),
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno, int refcount, int io_count,
|
||||
unsigned long bits, __u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, bp)
|
||||
__field(__u64, blkno)
|
||||
__field(int, refcount)
|
||||
__field(int, io_count)
|
||||
__field(unsigned long, bits)
|
||||
__field(__u64, lru_moved)
|
||||
__field(long, bits)
|
||||
__field(__u64, accessed)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
@@ -2170,57 +2049,71 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
__entry->refcount = refcount;
|
||||
__entry->io_count = io_count;
|
||||
__entry->bits = bits;
|
||||
__entry->lru_moved = lru_moved;
|
||||
__entry->accessed = accessed;
|
||||
),
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx lru_moved %llu",
|
||||
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno,
|
||||
__entry->refcount, __entry->io_count, __entry->bits,
|
||||
__entry->lru_moved)
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx accessed %llu",
|
||||
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno, __entry->refcount,
|
||||
__entry->io_count, __entry->bits, __entry->accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_allocate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_free,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_insert,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_remove,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_end_io,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_submit,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_invalidate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_mark_dirty,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_forget,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_shrink,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits, u64 lru_moved),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, lru_moved)
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_ext_next_class,
|
||||
@@ -2462,6 +2355,53 @@ TRACE_EVENT(scoutfs_alloc_move,
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_item_read_page,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *pg_start, struct scoutfs_key *pg_end),
|
||||
TP_ARGS(sb, key, pg_start, pg_end),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(key)
|
||||
sk_trace_define(pg_start)
|
||||
sk_trace_define(pg_end)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(key, key);
|
||||
sk_trace_assign(pg_start, pg_start);
|
||||
sk_trace_assign(pg_end, pg_end);
|
||||
),
|
||||
TP_printk(SCSBF" key "SK_FMT" pg_start "SK_FMT" pg_end "SK_FMT,
|
||||
SCSB_TRACE_ARGS, sk_trace_args(key), sk_trace_args(pg_start),
|
||||
sk_trace_args(pg_end))
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_item_invalidate_page,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end, struct scoutfs_key *pg_start,
|
||||
struct scoutfs_key *pg_end, int pgi),
|
||||
TP_ARGS(sb, start, end, pg_start, pg_end, pgi),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
sk_trace_define(pg_start)
|
||||
sk_trace_define(pg_end)
|
||||
__field(int, pgi)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
sk_trace_assign(pg_start, pg_start);
|
||||
sk_trace_assign(pg_end, pg_end);
|
||||
__entry->pgi = pgi;
|
||||
),
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" pg_start "SK_FMT" pg_end "SK_FMT" pgi %d",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end),
|
||||
sk_trace_args(pg_start), sk_trace_args(pg_end), __entry->pgi)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_SCOUTFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
@@ -59,7 +59,6 @@ struct server_info {
|
||||
int err;
|
||||
bool shutting_down;
|
||||
struct completion start_comp;
|
||||
struct sockaddr_in listen_sin;
|
||||
u64 term;
|
||||
struct scoutfs_net_connection *conn;
|
||||
|
||||
@@ -75,7 +74,7 @@ struct server_info {
|
||||
unsigned long nr_clients;
|
||||
|
||||
/* track clients waiting in unmmount for farewell response */
|
||||
struct mutex farewell_mutex;
|
||||
spinlock_t farewell_lock;
|
||||
struct list_head farewell_requests;
|
||||
struct work_struct farewell_work;
|
||||
|
||||
@@ -92,6 +91,7 @@ struct server_info {
|
||||
|
||||
struct mutex logs_mutex;
|
||||
struct mutex srch_mutex;
|
||||
struct mutex mounted_clients_mutex;
|
||||
|
||||
/* stable versions stored from commits, given in locks and rpcs */
|
||||
seqcount_t roots_seqcount;
|
||||
@@ -182,7 +182,7 @@ int scoutfs_server_apply_commit(struct super_block *sb, int err)
|
||||
return err;
|
||||
}
|
||||
|
||||
void scoutfs_server_get_roots(struct super_block *sb,
|
||||
static void get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
@@ -556,7 +556,7 @@ static int server_get_roots(struct super_block *sb,
|
||||
memset(&roots, 0, sizeof(roots));
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
scoutfs_server_get_roots(sb, &roots);
|
||||
get_roots(sb, &roots);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@@ -649,79 +649,10 @@ static void init_trans_seq_key(struct scoutfs_key *key, u64 seq, u64 rid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the client the next sequence number for their transaction. They
|
||||
* provide their previous transaction sequence number that they've
|
||||
* committed.
|
||||
*
|
||||
* We track the sequence numbers of transactions that clients have open.
|
||||
* This limits the transaction sequence numbers that can be returned in
|
||||
* the index of inodes by meta and data transaction numbers. We
|
||||
* communicate the largest possible sequence number to clients via an
|
||||
* rpc.
|
||||
*
|
||||
* The transaction sequence tracking is stored in a btree so it is
|
||||
* shared across servers. Final entries are removed when processing a
|
||||
* client's farewell or when it's removed.
|
||||
* Remove all trans_seq items owned by the client rid, the caller holds
|
||||
* the seq_rwsem.
|
||||
*/
|
||||
static int server_advance_seq(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
__le64 their_seq;
|
||||
__le64 next_seq;
|
||||
u64 rid = scoutfs_net_client_rid(conn);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (arg_len != sizeof(__le64)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(&their_seq, arg, sizeof(their_seq));
|
||||
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
|
||||
if (their_seq != 0) {
|
||||
init_trans_seq_key(&key, le64_to_cpu(their_seq), rid);
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc, &server->wri,
|
||||
&super->trans_seqs, &key);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
next_seq = super->next_trans_seq;
|
||||
le64_add_cpu(&super->next_trans_seq, 1);
|
||||
|
||||
trace_scoutfs_trans_seq_advance(sb, rid, le64_to_cpu(their_seq),
|
||||
le64_to_cpu(next_seq));
|
||||
|
||||
init_trans_seq_key(&key, le64_to_cpu(next_seq), rid);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->trans_seqs, &key, NULL, 0);
|
||||
unlock:
|
||||
up_write(&server->seq_rwsem);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
|
||||
out:
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
&next_seq, sizeof(next_seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove any transaction sequences owned by the client. They must have
|
||||
* committed any final transaction by the time they get here via sending
|
||||
* their farewell message. This can be called multiple times as the
|
||||
* client's farewell is retransmitted so it's OK to not find any
|
||||
* entries. This is called with the server commit rwsem held.
|
||||
*/
|
||||
static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
static int remove_trans_seq_locked(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -730,8 +661,6 @@ static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
|
||||
init_trans_seq_key(&key, 0, 0);
|
||||
|
||||
for (;;) {
|
||||
@@ -746,17 +675,102 @@ static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
|
||||
if (le64_to_cpu(key.skts_rid) == rid) {
|
||||
trace_scoutfs_trans_seq_farewell(sb, rid,
|
||||
trace_scoutfs_trans_seq_remove(sb, rid,
|
||||
le64_to_cpu(key.skts_trans_seq));
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc,
|
||||
&server->wri,
|
||||
&super->trans_seqs, &key);
|
||||
break;
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the client the next sequence number for the transaction that
|
||||
* they're opening.
|
||||
*
|
||||
* We track the sequence numbers of transactions that clients have open.
|
||||
* This limits the transaction sequence numbers that can be returned in
|
||||
* the index of inodes by meta and data transaction numbers. We
|
||||
* communicate the largest possible sequence number to clients via an
|
||||
* rpc.
|
||||
*
|
||||
* The transaction sequence tracking is stored in a btree so it is
|
||||
* shared across servers. Final entries are removed when processing a
|
||||
* client's farewell or when it's removed. We can be processent a
|
||||
* resent request that was committed by a previous server before the
|
||||
* reply was lost. At this point the client has no transactions open
|
||||
* and may or may not have just finished one. To keep it simple we
|
||||
* always remove any previous seq items, if there are any, and then
|
||||
* insert a new item for the client at the next greatest seq.
|
||||
*/
|
||||
static int server_advance_seq(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
u64 rid = scoutfs_net_client_rid(conn);
|
||||
struct scoutfs_key key;
|
||||
__le64 leseq = 0;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
if (arg_len != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
|
||||
ret = remove_trans_seq_locked(sb, rid);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
seq = le64_to_cpu(super->next_trans_seq);
|
||||
le64_add_cpu(&super->next_trans_seq, 1);
|
||||
|
||||
trace_scoutfs_trans_seq_advance(sb, rid, seq);
|
||||
|
||||
init_trans_seq_key(&key, seq, rid);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->trans_seqs, &key, NULL, 0);
|
||||
if (ret == 0)
|
||||
leseq = cpu_to_le64(seq);
|
||||
unlock:
|
||||
up_write(&server->seq_rwsem);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
|
||||
out:
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
&leseq, sizeof(leseq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove any transaction sequences owned by the client who's sent a
|
||||
* farewell They must have committed any final transaction by the time
|
||||
* they get here via sending their farewell message. This can be called
|
||||
* multiple times as the client's farewell is retransmitted so it's OK
|
||||
* to not find any entries. This is called with the server commit rwsem
|
||||
* held.
|
||||
*/
|
||||
static int remove_trans_seq(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
int ret = 0;
|
||||
|
||||
down_write(&server->seq_rwsem);
|
||||
ret = remove_trans_seq_locked(sb, rid);
|
||||
up_write(&server->seq_rwsem);
|
||||
|
||||
return ret;
|
||||
@@ -848,13 +862,13 @@ int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
|
||||
}
|
||||
|
||||
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock_grant_response *gr)
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
struct server_info *server = SCOUTFS_SB(sb)->server_info;
|
||||
|
||||
return scoutfs_net_response_node(sb, server->conn, rid,
|
||||
SCOUTFS_NET_CMD_LOCK, id, 0,
|
||||
gr, sizeof(*gr));
|
||||
nl, sizeof(*nl));
|
||||
}
|
||||
|
||||
static bool invalid_recover(struct scoutfs_net_lock_recover *nlr,
|
||||
@@ -1010,6 +1024,12 @@ static void init_mounted_client_key(struct scoutfs_key *key, u64 rid)
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a new mounted client item for a client that is sending us a
|
||||
* greeting that hasn't yet seen a response. The greeting can be
|
||||
* retransmitted to a new server after the previous inserted the item so
|
||||
* it's acceptable to see -EEXIST.
|
||||
*/
|
||||
static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
u64 gr_flags)
|
||||
{
|
||||
@@ -1017,15 +1037,22 @@ static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_mounted_client_btree_val mcv;
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_mounted_client_key(&key, rid);
|
||||
mcv.flags = 0;
|
||||
if (gr_flags & SCOUTFS_NET_GREETING_FLAG_VOTER)
|
||||
mcv.flags |= SCOUTFS_MOUNTED_CLIENT_VOTER;
|
||||
if (gr_flags & SCOUTFS_NET_GREETING_FLAG_QUORUM)
|
||||
mcv.flags |= SCOUTFS_MOUNTED_CLIENT_QUORUM;
|
||||
|
||||
return scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key, &mcv,
|
||||
sizeof(mcv));
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key, &mcv,
|
||||
sizeof(mcv));
|
||||
if (ret == -EEXIST)
|
||||
ret = 0;
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1033,9 +1060,6 @@ static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
* removed if we're processing a farewell on behalf of a client that
|
||||
* already had a previous server process its farewell.
|
||||
*
|
||||
* When we remove the last mounted client that's voting we write a new
|
||||
* quorum block with the updated unmount_barrier.
|
||||
*
|
||||
* The caller has to serialize with farewell processing.
|
||||
*/
|
||||
static int delete_mounted_client(struct super_block *sb, u64 rid)
|
||||
@@ -1047,8 +1071,10 @@ static int delete_mounted_client(struct super_block *sb, u64 rid)
|
||||
|
||||
init_mounted_client_key(&key, rid);
|
||||
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
@@ -1096,6 +1122,20 @@ static int cancel_srch_compact(struct super_block *sb, u64 rid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Farewell processing is async to the request processing work. Shutdown
|
||||
* waits for request processing to finish and then tears down the connection.
|
||||
* We don't want to queue farewell processing once we start shutting down
|
||||
* so that we don't have farewell processing racing with the connecting
|
||||
* being shutdown. If a mount's farewell message is dropped by a server
|
||||
* it will be processed by the next server.
|
||||
*/
|
||||
static void queue_farewell_work(struct server_info *server)
|
||||
{
|
||||
if (!server->shutting_down)
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process an incoming greeting request in the server from the client.
|
||||
* We try to send responses to failed greetings so that the sender can
|
||||
@@ -1121,7 +1161,6 @@ static int server_greeting(struct super_block *sb,
|
||||
struct scoutfs_net_greeting *gr = arg;
|
||||
struct scoutfs_net_greeting greet;
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
__le64 umb = 0;
|
||||
bool reconnecting;
|
||||
bool first_contact;
|
||||
bool farewell;
|
||||
@@ -1141,10 +1180,10 @@ static int server_greeting(struct super_block *sb,
|
||||
goto send_err;
|
||||
}
|
||||
|
||||
if (gr->format_hash != super->format_hash) {
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "client sent format 0x%llx, server has 0x%llx",
|
||||
le64_to_cpu(gr->format_hash),
|
||||
le64_to_cpu(super->format_hash));
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
ret = -EINVAL;
|
||||
goto send_err;
|
||||
}
|
||||
@@ -1154,28 +1193,19 @@ static int server_greeting(struct super_block *sb,
|
||||
if (ret < 0)
|
||||
goto send_err;
|
||||
|
||||
spin_lock(&server->lock);
|
||||
umb = super->unmount_barrier;
|
||||
spin_unlock(&server->lock);
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
ret = insert_mounted_client(sb, le64_to_cpu(gr->rid),
|
||||
le64_to_cpu(gr->flags));
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
} else {
|
||||
umb = gr->unmount_barrier;
|
||||
}
|
||||
|
||||
send_err:
|
||||
err = ret;
|
||||
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.format_hash = super->format_hash;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(server->term);
|
||||
greet.unmount_barrier = umb;
|
||||
greet.rid = gr->rid;
|
||||
greet.flags = 0;
|
||||
|
||||
@@ -1231,19 +1261,17 @@ static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
|
||||
|
||||
/*
|
||||
* This work processes farewell requests asynchronously. Requests from
|
||||
* voting clients can be held until only the final quorum remains and
|
||||
* quorum members can be held until only the final majority remains and
|
||||
* they've all sent farewell requests.
|
||||
*
|
||||
* When we remove the last mounted client record for the last voting
|
||||
* client then we increase the unmount_barrier and write it to the super
|
||||
* block. If voting clients don't get their farewell response they'll
|
||||
* see the greater umount_barrier in the super and will know that their
|
||||
* farewell has been processed and that they can exit.
|
||||
* A client can be disconnected before receiving our farewell response.
|
||||
* Before reconnecting they check for their mounted client item, if it's
|
||||
* been removed then they know that their farewell has been processed
|
||||
* and that they finish unmounting without reconnecting.
|
||||
*
|
||||
* Responses that are waiting for clients who aren't voting are
|
||||
* immediately sent. Clients that don't have a mounted client record
|
||||
* have already had their farewell processed by another server and can
|
||||
* proceed.
|
||||
* Responses for clients who aren't quorum members are immediately sent.
|
||||
* Clients that don't have a mounted client record have already had
|
||||
* their farewell processed by another server and can proceed.
|
||||
*
|
||||
* Farewell responses are unique in that sending them causes the server
|
||||
* to shutdown the connection to the client next time the socket
|
||||
@@ -1265,56 +1293,26 @@ static void farewell_worker(struct work_struct *work)
|
||||
struct farewell_request *tmp;
|
||||
struct farewell_request *fw;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
unsigned int nr_unmounting = 0;
|
||||
unsigned int nr_mounted = 0;
|
||||
unsigned int quo_reqs = 0;
|
||||
unsigned int quo_mnts = 0;
|
||||
unsigned int non_mnts = 0;
|
||||
struct scoutfs_key key;
|
||||
LIST_HEAD(reqs);
|
||||
LIST_HEAD(send);
|
||||
bool deleted = false;
|
||||
bool voting;
|
||||
bool more_reqs;
|
||||
int ret;
|
||||
|
||||
/* grab all the requests that are waiting */
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_splice_init(&server->farewell_requests, &reqs);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
/* count how many reqs requests are from voting clients */
|
||||
nr_unmounting = 0;
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
init_mounted_client_key(&key, fw->rid);
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
mcv = iref.val;
|
||||
voting = (mcv->flags & SCOUTFS_MOUNTED_CLIENT_VOTER) != 0;
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
|
||||
if (!voting) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
|
||||
nr_unmounting++;
|
||||
}
|
||||
|
||||
/* see how many mounted clients could vote for quorum */
|
||||
/* first count mounted clients who could send requests */
|
||||
init_mounted_client_key(&key, 0);
|
||||
for (;;) {
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_next(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
@@ -1328,23 +1326,62 @@ static void farewell_worker(struct work_struct *work)
|
||||
key = *iref.key;
|
||||
mcv = iref.val;
|
||||
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_VOTER)
|
||||
nr_mounted++;
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_QUORUM)
|
||||
quo_mnts++;
|
||||
else
|
||||
non_mnts++;
|
||||
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
/* send as many responses as we can to maintain quorum */
|
||||
while ((fw = list_first_entry_or_null(&reqs, struct farewell_request,
|
||||
entry)) &&
|
||||
(nr_mounted > super->quorum_count ||
|
||||
nr_unmounting >= nr_mounted)) {
|
||||
/* walk requests, checking their mounted client items */
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
init_mounted_client_key(&key, fw->rid);
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret < 0) {
|
||||
/* missing items means we've already processed */
|
||||
if (ret == -ENOENT) {
|
||||
list_move(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_move_tail(&fw->entry, &send);
|
||||
nr_mounted--;
|
||||
nr_unmounting--;
|
||||
deleted = true;
|
||||
mcv = iref.val;
|
||||
|
||||
/* count quo reqs, can always send to non-quo clients */
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_QUORUM) {
|
||||
quo_reqs++;
|
||||
} else {
|
||||
list_move(&fw->entry, &send);
|
||||
non_mnts--;
|
||||
}
|
||||
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only requests from quorum members remain and we've counted
|
||||
* them and remaining mounts. Send responses as long as enough
|
||||
* quorum clients remain for a majority, or all the requests are
|
||||
* from the final majority of quorum clients they're the only
|
||||
* mounted clients.
|
||||
*/
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
if ((quo_mnts > scoutfs_quorum_votes_needed(sb)) ||
|
||||
((quo_reqs == quo_mnts) && (non_mnts == 0))) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
quo_mnts--;
|
||||
quo_reqs--;
|
||||
}
|
||||
}
|
||||
|
||||
/* process and send farewell responses */
|
||||
@@ -1353,24 +1390,12 @@ static void farewell_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* delete mounted client last, client reconnect looks for it */
|
||||
ret = scoutfs_lock_server_farewell(sb, fw->rid) ?:
|
||||
remove_trans_seq(sb, fw->rid) ?:
|
||||
reclaim_log_trees(sb, fw->rid) ?:
|
||||
delete_mounted_client(sb, fw->rid) ?:
|
||||
cancel_srch_compact(sb, fw->rid);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* update the unmount barrier if we deleted all voting clients */
|
||||
if (deleted && nr_mounted == 0) {
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
le64_add_cpu(&super->unmount_barrier, 1);
|
||||
cancel_srch_compact(sb, fw->rid) ?:
|
||||
delete_mounted_client(sb, fw->rid);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
if (ret)
|
||||
@@ -1392,16 +1417,16 @@ static void farewell_worker(struct work_struct *work)
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
more_reqs = !list_empty(&server->farewell_requests);
|
||||
list_splice_init(&reqs, &server->farewell_requests);
|
||||
list_splice_init(&send, &server->farewell_requests);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
if (ret < 0)
|
||||
stop_server(server);
|
||||
else if (more_reqs && !server->shutting_down)
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
else if (more_reqs)
|
||||
queue_farewell_work(server);
|
||||
}
|
||||
|
||||
static void free_farewell_requests(struct super_block *sb, u64 rid)
|
||||
@@ -1409,15 +1434,17 @@ static void free_farewell_requests(struct super_block *sb, u64 rid)
|
||||
struct server_info *server = SCOUTFS_SB(sb)->server_info;
|
||||
struct farewell_request *tmp;
|
||||
struct farewell_request *fw;
|
||||
LIST_HEAD(rid_list);
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_for_each_entry_safe(fw, tmp, &server->farewell_requests, entry) {
|
||||
if (rid == 0 || fw->rid == rid) {
|
||||
list_del_init(&fw->entry);
|
||||
kfree(fw);
|
||||
}
|
||||
if (rid == 0 || fw->rid == rid)
|
||||
list_move_tail(&fw->entry, &rid_list);
|
||||
}
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
list_for_each_entry_safe(fw, tmp, &rid_list, entry)
|
||||
kfree(fw);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1451,11 +1478,11 @@ static int server_farewell(struct super_block *sb,
|
||||
fw->rid = rid;
|
||||
fw->net_id = id;
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_add_tail(&fw->entry, &server->farewell_requests);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
queue_farewell_work(server);
|
||||
|
||||
/* response will be sent later */
|
||||
return 0;
|
||||
@@ -1520,18 +1547,16 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
struct super_block *sb = server->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_net_connection *conn = NULL;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct sockaddr_in sin;
|
||||
LIST_HEAD(conn_list);
|
||||
u64 max_vers;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
trace_scoutfs_server_work_enter(sb, 0, 0);
|
||||
|
||||
sin = server->listen_sin;
|
||||
|
||||
scoutfs_quorum_slot_sin(super, opts->quorum_slot_nr, &sin);
|
||||
scoutfs_info(sb, "server setting up at "SIN_FMT, SIN_ARG(&sin));
|
||||
|
||||
conn = scoutfs_net_alloc_conn(sb, server_notify_up, server_notify_down,
|
||||
@@ -1551,9 +1576,6 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* start up the server subsystems before accepting */
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
@@ -1593,19 +1615,6 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto shutdown;
|
||||
|
||||
/*
|
||||
* Write our address in the super before it's possible for net
|
||||
* processing to start writing the super as part of
|
||||
* transactions. In theory clients could be trying to connect
|
||||
* to our address without having seen it in the super (maybe
|
||||
* they saw it a long time ago).
|
||||
*/
|
||||
scoutfs_addr_from_sin(&super->server_addr, &sin);
|
||||
super->quorum_server_term = cpu_to_le64(server->term);
|
||||
ret = scoutfs_write_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto shutdown;
|
||||
|
||||
/* start accepting connections and processing work */
|
||||
server->conn = conn;
|
||||
scoutfs_net_listen(sb, conn);
|
||||
@@ -1618,39 +1627,28 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
|
||||
shutdown:
|
||||
scoutfs_info(sb, "server shutting down at "SIN_FMT, SIN_ARG(&sin));
|
||||
/* wait for request processing */
|
||||
|
||||
/* wait for farewell to finish sending messages */
|
||||
flush_work(&server->farewell_work);
|
||||
|
||||
/* wait for requests to finish, no more requests */
|
||||
scoutfs_net_shutdown(sb, conn);
|
||||
/* wait for commit queued by request processing */
|
||||
flush_work(&server->commit_work);
|
||||
server->conn = NULL;
|
||||
|
||||
/* wait for extra queues by requests, won't find waiters */
|
||||
flush_work(&server->commit_work);
|
||||
|
||||
scoutfs_lock_server_destroy(sb);
|
||||
|
||||
out:
|
||||
scoutfs_quorum_clear_leader(sb);
|
||||
scoutfs_net_free_conn(sb, conn);
|
||||
|
||||
/* let quorum know that we've shutdown */
|
||||
scoutfs_quorum_server_shutdown(sb);
|
||||
|
||||
scoutfs_info(sb, "server stopped at "SIN_FMT, SIN_ARG(&sin));
|
||||
trace_scoutfs_server_work_exit(sb, 0, ret);
|
||||
|
||||
/*
|
||||
* Always try to clear our presence in the super so that we're
|
||||
* not fenced. We do this last because other mounts will try to
|
||||
* reach quorum the moment they see zero here. The later we do
|
||||
* this the longer we have to finish shutdown while clients
|
||||
* timeout.
|
||||
*/
|
||||
err = scoutfs_read_super(sb, super);
|
||||
if (err == 0) {
|
||||
super->quorum_fenced_term = cpu_to_le64(server->term);
|
||||
memset(&super->server_addr, 0, sizeof(super->server_addr));
|
||||
err = scoutfs_write_super(sb, super);
|
||||
}
|
||||
if (err < 0) {
|
||||
scoutfs_err(sb, "failed to clear election term %llu at "SIN_FMT", this mount could be fenced",
|
||||
server->term, SIN_ARG(&sin));
|
||||
}
|
||||
|
||||
server->err = ret;
|
||||
complete(&server->start_comp);
|
||||
}
|
||||
@@ -1660,14 +1658,12 @@ out:
|
||||
* the super block's fence_term has been set to the new server's term so
|
||||
* that it won't be fenced.
|
||||
*/
|
||||
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
|
||||
u64 term)
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
server->err = 0;
|
||||
server->shutting_down = false;
|
||||
server->listen_sin = *sin;
|
||||
server->term = term;
|
||||
init_completion(&server->start_comp);
|
||||
|
||||
@@ -1696,8 +1692,9 @@ void scoutfs_server_stop(struct super_block *sb)
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
stop_server(server);
|
||||
/* XXX not sure both are needed */
|
||||
|
||||
cancel_work_sync(&server->work);
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
cancel_work_sync(&server->commit_work);
|
||||
}
|
||||
|
||||
@@ -1719,12 +1716,13 @@ int scoutfs_server_setup(struct super_block *sb)
|
||||
INIT_WORK(&server->commit_work, scoutfs_server_commit_func);
|
||||
init_rwsem(&server->seq_rwsem);
|
||||
INIT_LIST_HEAD(&server->clients);
|
||||
mutex_init(&server->farewell_mutex);
|
||||
spin_lock_init(&server->farewell_lock);
|
||||
INIT_LIST_HEAD(&server->farewell_requests);
|
||||
INIT_WORK(&server->farewell_work, farewell_worker);
|
||||
mutex_init(&server->alloc_mutex);
|
||||
mutex_init(&server->logs_mutex);
|
||||
mutex_init(&server->srch_mutex);
|
||||
mutex_init(&server->mounted_clients_mutex);
|
||||
seqcount_init(&server->roots_seqcount);
|
||||
|
||||
server->wq = alloc_workqueue("scoutfs_server",
|
||||
@@ -1752,11 +1750,12 @@ void scoutfs_server_destroy(struct super_block *sb)
|
||||
|
||||
/* wait for server work to wait for everything to shut down */
|
||||
cancel_work_sync(&server->work);
|
||||
/* farewell work triggers commits */
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
/* recv work/compaction could have left commit_work queued */
|
||||
cancel_work_sync(&server->commit_work);
|
||||
|
||||
/* pending farewell requests are another server's problem */
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
free_farewell_requests(sb, 0);
|
||||
|
||||
trace_scoutfs_server_workqueue_destroy(sb, 0, 0);
|
||||
|
||||
@@ -59,18 +59,15 @@ do { \
|
||||
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock_grant_response *gr);
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
void scoutfs_server_get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots);
|
||||
int scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
|
||||
struct sockaddr_in;
|
||||
struct scoutfs_quorum_elected_info;
|
||||
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
|
||||
u64 term);
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
|
||||
|
||||
184
kmod/src/srch.c
184
kmod/src/srch.c
@@ -255,24 +255,9 @@ static u8 height_for_blk(u64 blk)
|
||||
return hei;
|
||||
}
|
||||
|
||||
static void init_file_block(struct super_block *sb, struct scoutfs_block *bl,
|
||||
int level)
|
||||
static inline u32 srch_level_magic(int level)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_block_header *hdr;
|
||||
|
||||
/* don't leak uninit kernel mem.. block should do this for us? */
|
||||
memset(bl->data, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
|
||||
hdr = bl->data;
|
||||
hdr->fsid = super->hdr.fsid;
|
||||
hdr->blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
|
||||
|
||||
if (level)
|
||||
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_PARENT);
|
||||
else
|
||||
hdr->magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK);
|
||||
return level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT : SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -284,39 +269,15 @@ static void init_file_block(struct super_block *sb, struct scoutfs_block *bl,
|
||||
*/
|
||||
static int read_srch_block(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri, int level,
|
||||
struct scoutfs_srch_ref *ref,
|
||||
struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_block *bl;
|
||||
int retries = 0;
|
||||
int ret = 0;
|
||||
int mag;
|
||||
u32 magic = srch_level_magic(level);
|
||||
int ret;
|
||||
|
||||
mag = level ? SCOUTFS_BLOCK_MAGIC_SRCH_PARENT :
|
||||
SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK;
|
||||
retry:
|
||||
bl = scoutfs_block_read(sb, le64_to_cpu(ref->blkno));
|
||||
if (!IS_ERR_OR_NULL(bl) &&
|
||||
!scoutfs_block_consistent_ref(sb, bl, ref->seq, ref->blkno, mag)) {
|
||||
|
||||
scoutfs_inc_counter(sb, srch_inconsistent_ref);
|
||||
scoutfs_block_writer_forget(sb, wri, bl);
|
||||
scoutfs_block_invalidate(sb, bl);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
|
||||
if (retries++ == 0)
|
||||
goto retry;
|
||||
|
||||
bl = ERR_PTR(-ESTALE);
|
||||
ret = scoutfs_block_read_ref(sb, ref, magic, bl_ret);
|
||||
if (ret == -ESTALE)
|
||||
scoutfs_inc_counter(sb, srch_read_stale);
|
||||
}
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
bl = NULL;
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -333,7 +294,7 @@ static int read_path_block(struct super_block *sb,
|
||||
{
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_srch_parent *srp;
|
||||
struct scoutfs_srch_ref ref;
|
||||
struct scoutfs_block_ref ref;
|
||||
int level;
|
||||
int ind;
|
||||
int ret;
|
||||
@@ -392,12 +353,10 @@ static int get_file_block(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct scoutfs_srch_parent *srp;
|
||||
struct scoutfs_block *new_bl;
|
||||
struct scoutfs_srch_ref *ref;
|
||||
u64 blkno = 0;
|
||||
struct scoutfs_block_ref new_root_ref;
|
||||
struct scoutfs_block_ref *ref;
|
||||
int level;
|
||||
int ind;
|
||||
int err;
|
||||
int ret;
|
||||
u8 hei;
|
||||
|
||||
@@ -409,29 +368,21 @@ static int get_file_block(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
|
||||
memset(&new_root_ref, 0, sizeof(new_root_ref));
|
||||
level = sfl->height;
|
||||
|
||||
ret = scoutfs_block_dirty_ref(sb, alloc, wri, &new_root_ref,
|
||||
srch_level_magic(level), &bl, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(bl)) {
|
||||
ret = PTR_ERR(bl);
|
||||
goto out;
|
||||
}
|
||||
blkno = 0;
|
||||
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, bl);
|
||||
|
||||
init_file_block(sb, bl, sfl->height);
|
||||
if (sfl->height) {
|
||||
if (level) {
|
||||
srp = bl->data;
|
||||
srp->refs[0].blkno = sfl->ref.blkno;
|
||||
srp->refs[0].seq = sfl->ref.seq;
|
||||
srp->refs[0] = sfl->ref;
|
||||
}
|
||||
|
||||
hdr = bl->data;
|
||||
sfl->ref.blkno = hdr->blkno;
|
||||
sfl->ref.seq = hdr->seq;
|
||||
sfl->ref = new_root_ref;
|
||||
sfl->height++;
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
@@ -447,54 +398,13 @@ static int get_file_block(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* read an existing block */
|
||||
if (ref->blkno) {
|
||||
ret = read_srch_block(sb, wri, level, ref, &bl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* allocate a new block if we need it */
|
||||
if (!ref->blkno || ((flags & GFB_DIRTY) &&
|
||||
!scoutfs_block_writer_is_dirty(sb, bl))) {
|
||||
ret = scoutfs_alloc_meta(sb, alloc, wri, &blkno);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
new_bl = scoutfs_block_create(sb, blkno);
|
||||
if (IS_ERR(new_bl)) {
|
||||
ret = PTR_ERR(new_bl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bl) {
|
||||
/* cow old block if we have one */
|
||||
ret = scoutfs_free_meta(sb, alloc, wri,
|
||||
bl->blkno);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
memcpy(new_bl->data, bl->data,
|
||||
SCOUTFS_BLOCK_LG_SIZE);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = new_bl;
|
||||
hdr = bl->data;
|
||||
hdr->blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
|
||||
} else {
|
||||
/* init new allocated block */
|
||||
bl = new_bl;
|
||||
init_file_block(sb, bl, level);
|
||||
}
|
||||
|
||||
blkno = 0;
|
||||
scoutfs_block_writer_mark_dirty(sb, wri, bl);
|
||||
|
||||
/* update file or parent block ref */
|
||||
hdr = bl->data;
|
||||
ref->blkno = hdr->blkno;
|
||||
ref->seq = hdr->seq;
|
||||
}
|
||||
if (flags & GFB_DIRTY)
|
||||
ret = scoutfs_block_dirty_ref(sb, alloc, wri, ref, srch_level_magic(level),
|
||||
&bl, 0, NULL);
|
||||
else
|
||||
ret = scoutfs_block_read_ref(sb, ref, srch_level_magic(level), &bl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (level == 0) {
|
||||
ret = 0;
|
||||
@@ -514,12 +424,6 @@ static int get_file_block(struct super_block *sb,
|
||||
out:
|
||||
scoutfs_block_put(sb, parent);
|
||||
|
||||
/* return allocated blkno on error */
|
||||
if (blkno > 0) {
|
||||
err = scoutfs_free_meta(sb, alloc, wri, blkno);
|
||||
BUG_ON(err); /* radix should have been dirty */
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
@@ -1198,14 +1102,10 @@ int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
|
||||
for (;;scoutfs_key_inc(&key)) {
|
||||
ret = scoutfs_btree_next(sb, root, &key, &iref);
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
sc->nr = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(struct scoutfs_srch_file)) {
|
||||
if (iref.key->sk_type != type) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len == sizeof(sfl)) {
|
||||
key = *iref.key;
|
||||
memcpy(&sfl, iref.val, iref.val_len);
|
||||
} else {
|
||||
@@ -1213,24 +1113,25 @@ int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
/* see if we ran out of log files or files entirely */
|
||||
if (ret == -ENOENT) {
|
||||
sc->nr = 0;
|
||||
if (type == SCOUTFS_SRCH_LOG_TYPE) {
|
||||
type = SCOUTFS_SRCH_BLOCKS_TYPE;
|
||||
init_srch_key(&key, type, 0, 0);
|
||||
continue;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* skip any files already being compacted */
|
||||
if (scoutfs_spbm_test(&busy, le64_to_cpu(sfl.ref.blkno)))
|
||||
continue;
|
||||
|
||||
/* see if we ran out of log files or files entirely */
|
||||
if (key.sk_type != type) {
|
||||
sc->nr = 0;
|
||||
if (key.sk_type == SCOUTFS_SRCH_BLOCKS_TYPE) {
|
||||
type = SCOUTFS_SRCH_BLOCKS_TYPE;
|
||||
} else {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* reset if we iterated into the next size category */
|
||||
if (type == SCOUTFS_SRCH_BLOCKS_TYPE) {
|
||||
order = fls64(le64_to_cpu(sfl.blocks)) /
|
||||
@@ -2255,7 +2156,8 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
if (ret < 0)
|
||||
goto commit;
|
||||
|
||||
ret = scoutfs_block_writer_write(sb, &wri);
|
||||
ret = scoutfs_alloc_prepare_commit(sb, &alloc, &wri) ?:
|
||||
scoutfs_block_writer_write(sb, &wri);
|
||||
commit:
|
||||
/* the server won't use our partial compact if _ERROR is set */
|
||||
sc->meta_avail = alloc.avail;
|
||||
|
||||
113
kmod/src/super.c
113
kmod/src/super.c
@@ -176,7 +176,8 @@ static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
seq_printf(seq, ",server_addr="SIN_FMT, SIN_ARG(&opts->server_addr));
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
@@ -192,20 +193,19 @@ static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t server_addr_show(struct kobject *kobj,
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, SIN_FMT"\n",
|
||||
SIN_ARG(&opts->server_addr));
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(server_addr);
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(server_addr),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -257,15 +257,12 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
|
||||
/* the server locks the listen address and compacts */
|
||||
scoutfs_quorum_destroy(sb);
|
||||
scoutfs_lock_shutdown(sb);
|
||||
scoutfs_server_destroy(sb);
|
||||
scoutfs_net_destroy(sb);
|
||||
scoutfs_lock_destroy(sb);
|
||||
|
||||
/* server clears quorum leader flag during shutdown */
|
||||
scoutfs_quorum_destroy(sb);
|
||||
|
||||
scoutfs_block_destroy(sb);
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
@@ -309,6 +306,34 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
sizeof(struct scoutfs_super_block));
|
||||
}
|
||||
|
||||
static bool invalid_blkno_limits(struct super_block *sb, char *which,
|
||||
u64 start, __le64 first, __le64 last,
|
||||
struct block_device *bdev, int shift)
|
||||
{
|
||||
u64 blkno;
|
||||
|
||||
if (le64_to_cpu(first) < start) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
|
||||
which, le64_to_cpu(first), start);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(first) > le64_to_cpu(last)) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
|
||||
which, le64_to_cpu(first), which, le64_to_cpu(last));
|
||||
return true;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
|
||||
if (le64_to_cpu(last) > blkno) {
|
||||
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
|
||||
which, le64_to_cpu(last), blkno);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read super, specifying bdev.
|
||||
*/
|
||||
@@ -316,9 +341,9 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
struct block_device *bdev,
|
||||
struct scoutfs_super_block *super_res)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super;
|
||||
__le32 calc;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -352,58 +377,27 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
}
|
||||
|
||||
|
||||
if (super->format_hash != cpu_to_le64(SCOUTFS_FORMAT_HASH)) {
|
||||
scoutfs_err(sb, "super block has invalid format hash 0x%llx, expected 0x%llx",
|
||||
le64_to_cpu(super->format_hash),
|
||||
SCOUTFS_FORMAT_HASH);
|
||||
if (super->version != cpu_to_le64(SCOUTFS_INTEROP_VERSION)) {
|
||||
scoutfs_err(sb, "super block has invalid version %llu, expected %llu",
|
||||
le64_to_cpu(super->version),
|
||||
SCOUTFS_INTEROP_VERSION);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX do we want more rigorous invalid super checking? */
|
||||
|
||||
if (super->quorum_count == 0 ||
|
||||
super->quorum_count > SCOUTFS_QUORUM_MAX_COUNT) {
|
||||
scoutfs_err(sb, "super block has invalid quorum count %u, must be > 0 and <= %u",
|
||||
super->quorum_count, SCOUTFS_QUORUM_MAX_COUNT);
|
||||
if (invalid_blkno_limits(sb, "meta",
|
||||
SCOUTFS_META_DEV_START_BLKNO,
|
||||
super->first_meta_blkno,
|
||||
super->last_meta_blkno, sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
invalid_blkno_limits(sb, "data",
|
||||
SCOUTFS_DATA_DEV_START_BLKNO,
|
||||
super->first_data_blkno,
|
||||
super->last_data_blkno, sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
blkno = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT;
|
||||
if (le64_to_cpu(super->first_meta_blkno) < blkno) {
|
||||
scoutfs_err(sb, "super block first meta blkno %llu is within quorum blocks",
|
||||
le64_to_cpu(super->first_meta_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->first_meta_blkno) >
|
||||
le64_to_cpu(super->last_meta_blkno)) {
|
||||
scoutfs_err(sb, "super block first meta blkno %llu is greater than last meta blkno %llu",
|
||||
le64_to_cpu(super->first_meta_blkno),
|
||||
le64_to_cpu(super->last_meta_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->first_data_blkno) >
|
||||
le64_to_cpu(super->last_data_blkno)) {
|
||||
scoutfs_err(sb, "super block first data blkno %llu is greater than last data blkno %llu",
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(sb->s_bdev->bd_inode) >>
|
||||
SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
if (le64_to_cpu(super->last_data_blkno) > blkno) {
|
||||
scoutfs_err(sb, "super block last data blkno %llu is outsite device size last blkno %llu",
|
||||
le64_to_cpu(super->last_data_blkno), blkno);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -599,8 +593,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_lock_setup(sb) ?:
|
||||
scoutfs_net_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_server_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_client_setup(sb) ?:
|
||||
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
|
||||
&sbi->rid_lock) ?:
|
||||
@@ -682,6 +676,10 @@ static int __init scoutfs_module_init(void)
|
||||
".section .note.git_describe,\"a\"\n"
|
||||
".string \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_interop_version,\"a\"\n"
|
||||
".string \""SCOUTFS_INTEROP_VERSION_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
|
||||
scoutfs_init_counters();
|
||||
|
||||
@@ -714,3 +712,4 @@ module_exit(scoutfs_module_exit)
|
||||
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_INFO(git_describe, SCOUTFS_GIT_DESCRIBE);
|
||||
MODULE_INFO(scoutfs_interop_version, SCOUTFS_INTEROP_VERSION_STR);
|
||||
|
||||
362
kmod/src/trans.c
362
kmod/src/trans.c
@@ -39,17 +39,15 @@
|
||||
* track the relationships between dirty blocks so there's only ever one
|
||||
* transaction being built.
|
||||
*
|
||||
* The copy of the on-disk super block in the fs sb info has its header
|
||||
* sequence advanced so that new dirty blocks inherit this dirty
|
||||
* sequence number. It's only advanced once all those dirty blocks are
|
||||
* reachable after having first written them all out and then the new
|
||||
* super with that seq. It's first incremented at mount.
|
||||
* Committing the current dirty transaction can be triggered by sync, a
|
||||
* regular background commit interval, reaching a dirty block threshold,
|
||||
* or the transaction running out of its private allocator resources.
|
||||
* Once all the current holders release the writing func writes out the
|
||||
* dirty blocks while excluding holders until it finishes.
|
||||
*
|
||||
* Unfortunately writers can nest. We don't bother trying to special
|
||||
* case holding a transaction that you're already holding because that
|
||||
* requires per-task storage. We just let anyone hold transactions
|
||||
* regardless of waiters waiting to write, which risks waiters waiting a
|
||||
* very long time.
|
||||
* Unfortunately writing holders can nest. We track nested hold callers
|
||||
* with the per-task journal_info pointer to avoid deadlocks between
|
||||
* holders that might otherwise wait for a pending commit.
|
||||
*/
|
||||
|
||||
/* sync dirty data at least this often */
|
||||
@@ -59,11 +57,7 @@
|
||||
* XXX move the rest of the super trans_ fields here.
|
||||
*/
|
||||
struct trans_info {
|
||||
spinlock_t lock;
|
||||
unsigned reserved_items;
|
||||
unsigned reserved_vals;
|
||||
unsigned holders;
|
||||
bool writing;
|
||||
atomic_t holders;
|
||||
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_alloc alloc;
|
||||
@@ -73,17 +67,9 @@ struct trans_info {
|
||||
#define DECLARE_TRANS_INFO(sb, name) \
|
||||
struct trans_info *name = SCOUTFS_SB(sb)->trans_info
|
||||
|
||||
static bool drained_holders(struct trans_info *tri)
|
||||
{
|
||||
bool drained;
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
tri->writing = true;
|
||||
drained = tri->holders == 0;
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
return drained;
|
||||
}
|
||||
/* avoid the high sign bit out of an abundance of caution*/
|
||||
#define TRANS_HOLDERS_WRITE_FUNC_BIT (1 << 30)
|
||||
#define TRANS_HOLDERS_COUNT_MASK (TRANS_HOLDERS_WRITE_FUNC_BIT - 1)
|
||||
|
||||
static int commit_btrees(struct super_block *sb)
|
||||
{
|
||||
@@ -128,6 +114,36 @@ bool scoutfs_trans_has_dirty(struct super_block *sb)
|
||||
return scoutfs_block_writer_has_dirty(sb, &tri->wri);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is racing with wait_event conditions, make sure our atomic
|
||||
* stores and waitqueue loads are ordered.
|
||||
*/
|
||||
static void sub_holders_and_wake(struct super_block *sb, int val)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
atomic_sub(val, &tri->holders);
|
||||
smp_mb(); /* make sure sub is visible before we wake */
|
||||
if (waitqueue_active(&sbi->trans_hold_wq))
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
* called as a wait_event condition, needs to be careful to not change
|
||||
* task state and is racing with waking paths that sub_return, test, and
|
||||
* wake.
|
||||
*/
|
||||
static bool drained_holders(struct trans_info *tri)
|
||||
{
|
||||
int holders;
|
||||
|
||||
smp_mb(); /* make sure task in wait_event queue before atomic read */
|
||||
holders = atomic_read(&tri->holders) & TRANS_HOLDERS_COUNT_MASK;
|
||||
|
||||
return holders == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work func is responsible for writing out all the dirty blocks
|
||||
* that make up the current dirty transaction. It prevents writers from
|
||||
@@ -164,6 +180,9 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
|
||||
sbi->trans_task = current;
|
||||
|
||||
/* mark that we're writing so holders wait for us to finish and clear our bit */
|
||||
atomic_add(TRANS_HOLDERS_WRITE_FUNC_BIT, &tri->holders);
|
||||
|
||||
wait_event(sbi->trans_hold_wq, drained_holders(tri));
|
||||
|
||||
trace_scoutfs_trans_write_func(sb,
|
||||
@@ -215,11 +234,8 @@ out:
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
wake_up(&sbi->trans_write_wq);
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
tri->writing = false;
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
/* we're done, wake waiting holders */
|
||||
sub_holders_and_wake(sb, TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
|
||||
sbi->trans_task = NULL;
|
||||
|
||||
@@ -311,64 +327,83 @@ void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
|
||||
}
|
||||
|
||||
/*
|
||||
* Each thread reserves space in the segment for their dirty items while
|
||||
* they hold the transaction. This is calculated before the first
|
||||
* transaction hold is acquired. It includes all the potential nested
|
||||
* item manipulation that could happen with the transaction held.
|
||||
* Including nested holds avoids having to deal with writing out partial
|
||||
* transactions while a caller still holds the transaction.
|
||||
* We store nested holders in the lower bits of journal_info. We use
|
||||
* some higher bits as a magic value to detect if something goes
|
||||
* horribly wrong and it gets clobbered.
|
||||
*/
|
||||
#define SCOUTFS_RESERVATION_MAGIC 0xd57cd13b
|
||||
struct scoutfs_reservation {
|
||||
unsigned magic;
|
||||
unsigned holders;
|
||||
struct scoutfs_item_count reserved;
|
||||
struct scoutfs_item_count actual;
|
||||
};
|
||||
#define TRANS_JI_MAGIC 0xd5700000
|
||||
#define TRANS_JI_MAGIC_MASK 0xfff00000
|
||||
#define TRANS_JI_COUNT_MASK 0x000fffff
|
||||
|
||||
/* returns true if a caller already had a holder counted in journal_info */
|
||||
static bool inc_journal_info_holders(void)
|
||||
{
|
||||
unsigned long holders = (unsigned long)current->journal_info;
|
||||
|
||||
WARN_ON_ONCE(holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) != TRANS_JI_MAGIC));
|
||||
|
||||
if (holders == 0)
|
||||
holders = TRANS_JI_MAGIC;
|
||||
holders++;
|
||||
|
||||
current->journal_info = (void *)holders;
|
||||
return (holders > (TRANS_JI_MAGIC | 1));
|
||||
}
|
||||
|
||||
static void dec_journal_info_holders(void)
|
||||
{
|
||||
unsigned long holders = (unsigned long)current->journal_info;
|
||||
|
||||
WARN_ON_ONCE(holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) != TRANS_JI_MAGIC));
|
||||
WARN_ON_ONCE((holders & TRANS_JI_COUNT_MASK) == 0);
|
||||
|
||||
holders--;
|
||||
if (holders == TRANS_JI_MAGIC)
|
||||
holders = 0;
|
||||
|
||||
current->journal_info = (void *)holders;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to hold the transaction. If a caller already holds the trans then
|
||||
* we piggy back on their hold. We wait if the writer is trying to
|
||||
* write out the transation. And if our items won't fit then we kick off
|
||||
* a write.
|
||||
* This is called as the wait_event condition for holding a transaction.
|
||||
* Increment the holder count unless the writer is present. We return
|
||||
* false to wait until the writer finishes and wakes us.
|
||||
*
|
||||
* This is called as a condition for wait_event. It is very limited in
|
||||
* the locking (blocking) it can do because the caller has set the task
|
||||
* state before testing the condition safely race with waking after
|
||||
* setting the condition. Our checking the amount of dirty metadata
|
||||
* blocks and free data blocks is racy, but we don't mind the risk of
|
||||
* delaying or prematurely forcing commits.
|
||||
* This can be racing with itself while there's no waiters. We retry
|
||||
* the cmpxchg instead of returning and waiting.
|
||||
*/
|
||||
static bool acquired_hold(struct super_block *sb,
|
||||
struct scoutfs_reservation *rsv,
|
||||
const struct scoutfs_item_count *cnt)
|
||||
static bool inc_holders_unless_writer(struct trans_info *tri)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired = false;
|
||||
unsigned items;
|
||||
unsigned vals;
|
||||
int holders;
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
do {
|
||||
smp_mb(); /* make sure we read after wait puts task in queue */
|
||||
holders = atomic_read(&tri->holders);
|
||||
if (holders & TRANS_HOLDERS_WRITE_FUNC_BIT)
|
||||
return false;
|
||||
|
||||
trace_scoutfs_trans_acquired_hold(sb, cnt, rsv, rsv->holders,
|
||||
&rsv->reserved, &rsv->actual,
|
||||
tri->holders, tri->writing,
|
||||
tri->reserved_items,
|
||||
tri->reserved_vals);
|
||||
} while (atomic_cmpxchg(&tri->holders, holders, holders + 1) != holders);
|
||||
|
||||
/* use a caller's existing reservation */
|
||||
if (rsv->holders)
|
||||
goto hold;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* wait until the writing thread is finished */
|
||||
if (tri->writing)
|
||||
goto out;
|
||||
|
||||
/* see if we can reserve space for our item count */
|
||||
items = tri->reserved_items + cnt->items;
|
||||
vals = tri->reserved_vals + cnt->vals;
|
||||
/*
|
||||
* As we drop the last trans holder we try to wake a writing thread that
|
||||
* was waiting for us to finish.
|
||||
*/
|
||||
static void release_holders(struct super_block *sb)
|
||||
{
|
||||
dec_journal_info_holders();
|
||||
sub_holders_and_wake(sb, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has incremented holders so it is blocking commits. We
|
||||
* make some quick checks to see if we need to trigger and wait for
|
||||
* another commit before proceeding.
|
||||
*/
|
||||
static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
{
|
||||
/*
|
||||
* In theory each dirty item page could be straddling two full
|
||||
* blocks, requiring 4 allocations for each item cache page.
|
||||
@@ -378,11 +413,9 @@ static bool acquired_hold(struct super_block *sb,
|
||||
* that it accounts for having to dirty parent blocks and
|
||||
* whatever dirtying is done during the transaction hold.
|
||||
*/
|
||||
if (scoutfs_alloc_meta_low(sb, &tri->alloc,
|
||||
scoutfs_item_dirty_pages(sb) * 2)) {
|
||||
if (scoutfs_alloc_meta_low(sb, &tri->alloc, scoutfs_item_dirty_pages(sb) * 2)) {
|
||||
scoutfs_inc_counter(sb, trans_commit_dirty_meta_full);
|
||||
queue_trans_work(sbi);
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -394,71 +427,74 @@ static bool acquired_hold(struct super_block *sb,
|
||||
*/
|
||||
if (scoutfs_alloc_meta_low(sb, &tri->alloc, 16)) {
|
||||
scoutfs_inc_counter(sb, trans_commit_meta_alloc_low);
|
||||
queue_trans_work(sbi);
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try to refill data allocator before premature enospc */
|
||||
if (scoutfs_data_alloc_free_bytes(sb) <= SCOUTFS_TRANS_DATA_ALLOC_LWM) {
|
||||
scoutfs_inc_counter(sb, trans_commit_data_alloc_low);
|
||||
queue_trans_work(sbi);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool acquired_hold(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired;
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
acquired = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tri->reserved_items = items;
|
||||
tri->reserved_vals = vals;
|
||||
/* wait if the writer is blocking holds */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rsv->reserved.items = cnt->items;
|
||||
rsv->reserved.vals = cnt->vals;
|
||||
/* wait if we're triggering another commit */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
release_holders(sb);
|
||||
queue_trans_work(sbi);
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hold:
|
||||
rsv->holders++;
|
||||
tri->holders++;
|
||||
trace_scoutfs_trans_acquired_hold(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
acquired = true;
|
||||
|
||||
out:
|
||||
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
return acquired;
|
||||
}
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb,
|
||||
const struct scoutfs_item_count cnt)
|
||||
/*
|
||||
* Try to hold the transaction. Holding the transaction prevents it
|
||||
* from being committed. If a transaction is currently being written
|
||||
* then we'll block until it's done and our hold can be granted.
|
||||
*
|
||||
* If a caller already holds the trans then we unconditionally acquire
|
||||
* our hold and return to avoid deadlocks with our caller, the writing
|
||||
* thread, and us. We record nested holds in a call stack with the
|
||||
* journal_info pointer in the task_struct.
|
||||
*
|
||||
* The writing thread marks itself as a global trans_task which
|
||||
* short-circuits all the hold machinery so it can call code that would
|
||||
* otherwise try to hold transactions while it is writing.
|
||||
*/
|
||||
int scoutfs_hold_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_reservation *rsv;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Caller shouldn't provide garbage counts, nor counts that
|
||||
* can't fit in segments by themselves.
|
||||
*/
|
||||
if (WARN_ON_ONCE(cnt.items <= 0 || cnt.vals < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
return 0;
|
||||
|
||||
rsv = current->journal_info;
|
||||
if (rsv == NULL) {
|
||||
rsv = kzalloc(sizeof(struct scoutfs_reservation), GFP_NOFS);
|
||||
if (!rsv)
|
||||
return -ENOMEM;
|
||||
|
||||
rsv->magic = SCOUTFS_RESERVATION_MAGIC;
|
||||
current->journal_info = rsv;
|
||||
}
|
||||
|
||||
BUG_ON(rsv->magic != SCOUTFS_RESERVATION_MAGIC);
|
||||
|
||||
ret = wait_event_interruptible(sbi->trans_hold_wq,
|
||||
acquired_hold(sb, rsv, &cnt));
|
||||
if (ret && rsv->holders == 0) {
|
||||
current->journal_info = NULL;
|
||||
kfree(rsv);
|
||||
}
|
||||
return ret;
|
||||
return wait_event_interruptible(sbi->trans_hold_wq, acquired_hold(sb));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -468,86 +504,22 @@ int scoutfs_hold_trans(struct super_block *sb,
|
||||
*/
|
||||
bool scoutfs_trans_held(void)
|
||||
{
|
||||
struct scoutfs_reservation *rsv = current->journal_info;
|
||||
unsigned long holders = (unsigned long)current->journal_info;
|
||||
|
||||
return rsv && rsv->magic == SCOUTFS_RESERVATION_MAGIC;
|
||||
return (holders != 0 && ((holders & TRANS_JI_MAGIC_MASK) == TRANS_JI_MAGIC));
|
||||
}
|
||||
|
||||
/*
|
||||
* Record a transaction holder's individual contribution to the dirty
|
||||
* items in the current transaction. We're making sure that the
|
||||
* reservation matches the possible item manipulations while they hold
|
||||
* the reservation.
|
||||
*
|
||||
* It is possible and legitimate for an individual contribution to be
|
||||
* negative if they delete dirty items. The item cache makes sure that
|
||||
* the total dirty item count doesn't fall below zero.
|
||||
*/
|
||||
void scoutfs_trans_track_item(struct super_block *sb, signed items,
|
||||
signed vals)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_reservation *rsv = current->journal_info;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
return;
|
||||
|
||||
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
|
||||
|
||||
rsv->actual.items += items;
|
||||
rsv->actual.vals += vals;
|
||||
|
||||
trace_scoutfs_trans_track_item(sb, items, vals, rsv->actual.items,
|
||||
rsv->actual.vals, rsv->reserved.items,
|
||||
rsv->reserved.vals);
|
||||
|
||||
WARN_ON_ONCE(rsv->actual.items > rsv->reserved.items);
|
||||
WARN_ON_ONCE(rsv->actual.vals > rsv->reserved.vals);
|
||||
}
|
||||
|
||||
/*
|
||||
* As we drop the last hold in the reservation we try and wake other
|
||||
* hold attempts that were waiting for space. As we drop the last trans
|
||||
* holder we try to wake a writing thread that was waiting for us to
|
||||
* finish.
|
||||
*/
|
||||
void scoutfs_release_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_reservation *rsv;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool wake = false;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
return;
|
||||
|
||||
rsv = current->journal_info;
|
||||
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
|
||||
release_holders(sb);
|
||||
|
||||
spin_lock(&tri->lock);
|
||||
|
||||
trace_scoutfs_release_trans(sb, rsv, rsv->holders, &rsv->reserved,
|
||||
&rsv->actual, tri->holders, tri->writing,
|
||||
tri->reserved_items, tri->reserved_vals);
|
||||
|
||||
BUG_ON(rsv->holders <= 0);
|
||||
BUG_ON(tri->holders <= 0);
|
||||
|
||||
if (--rsv->holders == 0) {
|
||||
tri->reserved_items -= rsv->reserved.items;
|
||||
tri->reserved_vals -= rsv->reserved.vals;
|
||||
current->journal_info = NULL;
|
||||
kfree(rsv);
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if (--tri->holders == 0)
|
||||
wake = true;
|
||||
|
||||
spin_unlock(&tri->lock);
|
||||
|
||||
if (wake)
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -576,7 +548,7 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
if (!tri)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&tri->lock);
|
||||
atomic_set(&tri->holders, 0);
|
||||
scoutfs_block_writer_init(sb, &tri->wri);
|
||||
|
||||
sbi->trans_write_workq = alloc_workqueue("scoutfs_trans",
|
||||
|
||||
@@ -6,21 +6,16 @@
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
|
||||
|
||||
#include "count.h"
|
||||
|
||||
void scoutfs_trans_write_func(struct work_struct *work);
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait);
|
||||
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb,
|
||||
const struct scoutfs_item_count cnt);
|
||||
int scoutfs_hold_trans(struct super_block *sb);
|
||||
bool scoutfs_trans_held(void);
|
||||
void scoutfs_release_trans(struct super_block *sb);
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb);
|
||||
void scoutfs_trans_track_item(struct super_block *sb, signed items,
|
||||
signed vals);
|
||||
|
||||
int scoutfs_trans_get_log_trees(struct super_block *sb);
|
||||
bool scoutfs_trans_has_dirty(struct super_block *sb);
|
||||
|
||||
@@ -38,10 +38,7 @@ struct scoutfs_triggers {
|
||||
struct scoutfs_triggers *name = SCOUTFS_SB(sb)->triggers
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BTREE_STALE_READ] = "btree_stale_read",
|
||||
[SCOUTFS_TRIGGER_BTREE_ADVANCE_RING_HALF] = "btree_advance_ring_half",
|
||||
[SCOUTFS_TRIGGER_HARD_STALE_ERROR] = "hard_stale_error",
|
||||
[SCOUTFS_TRIGGER_SEG_STALE_READ] = "seg_stale_read",
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||
};
|
||||
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
#define _SCOUTFS_TRIGGERS_H_
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BTREE_STALE_READ,
|
||||
SCOUTFS_TRIGGER_BTREE_ADVANCE_RING_HALF,
|
||||
SCOUTFS_TRIGGER_HARD_STALE_ERROR,
|
||||
SCOUTFS_TRIGGER_SEG_STALE_READ,
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||
SCOUTFS_TRIGGER_NR,
|
||||
};
|
||||
|
||||
20
kmod/src/util.h
Normal file
20
kmod/src/util.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef _SCOUTFS_UTIL_H_
|
||||
#define _SCOUTFS_UTIL_H_
|
||||
|
||||
/*
|
||||
* Little utility helpers that probably belong upstream.
|
||||
*/
|
||||
|
||||
static inline void down_write_two(struct rw_semaphore *a,
|
||||
struct rw_semaphore *b)
|
||||
{
|
||||
BUG_ON(a == b);
|
||||
|
||||
if (a > b)
|
||||
swap(a, b);
|
||||
|
||||
down_write(a);
|
||||
down_write_nested(b, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -577,10 +577,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
|
||||
SIC_XATTR_SET(found_parts,
|
||||
value != NULL,
|
||||
name_len, size));
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -781,7 +778,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
&tgs) != 0)
|
||||
memset(&tgs, 0, sizeof(tgs));
|
||||
|
||||
ret = scoutfs_hold_trans(sb, SIC_EXACT(2, 0));
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (ret < 0)
|
||||
break;
|
||||
release = true;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing -I ../kmod/src
|
||||
SHELL := /usr/bin/bash
|
||||
|
||||
# each binary command is built from a single .c file
|
||||
@@ -6,6 +6,7 @@ BIN := src/createmany \
|
||||
src/dumb_setxattr \
|
||||
src/handle_cat \
|
||||
src/bulk_create_paths \
|
||||
src/stage_tmpfile \
|
||||
src/find_xattrs
|
||||
|
||||
DEPS := $(wildcard src/*.d)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
t_filter_fs()
|
||||
{
|
||||
sed -e 's@mnt/test\.[0-9]*@mnt/test@g' \
|
||||
-e 's@Device: [a-fA-F0-7]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
}
|
||||
|
||||
#
|
||||
@@ -59,5 +59,8 @@ t_filter_dmesg()
|
||||
# some tests mount w/o options
|
||||
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
|
||||
egrep -v "($re)"
|
||||
}
|
||||
|
||||
@@ -28,8 +28,8 @@ t_ident()
|
||||
local fsid
|
||||
local rid
|
||||
|
||||
fsid=$(scoutfs statfs -s fsid "$mnt")
|
||||
rid=$(scoutfs statfs -s rid "$mnt")
|
||||
fsid=$(scoutfs statfs -s fsid -p "$mnt")
|
||||
rid=$(scoutfs statfs -s rid -p "$mnt")
|
||||
|
||||
echo "f.${fsid:0:6}.r.${rid:0:6}"
|
||||
}
|
||||
@@ -99,6 +99,19 @@ t_first_client_nr()
|
||||
t_fail "t_first_client_nr didn't find any clients"
|
||||
}
|
||||
|
||||
#
|
||||
# The number of quorum members needed to form a majority to start the
|
||||
# server.
|
||||
#
|
||||
t_majority_count()
|
||||
{
|
||||
if [ "$T_QUORUM" -lt 3 ]; then
|
||||
echo 1
|
||||
else
|
||||
echo $(((T_QUORUM / 2) + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
t_mount()
|
||||
{
|
||||
local nr="$1"
|
||||
@@ -116,7 +129,7 @@ t_umount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount \$T_DB$i
|
||||
eval t_quiet umount \$T_M$i
|
||||
}
|
||||
|
||||
#
|
||||
@@ -196,12 +209,19 @@ t_trigger_show() {
|
||||
echo "trigger $which $string: $(t_trigger_get $which $nr)"
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
t_trigger_arm_silent() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo 1 > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
|
||||
t_trigger_arm_silent $which $nr
|
||||
t_trigger_show $which armed $nr
|
||||
}
|
||||
|
||||
@@ -216,16 +236,44 @@ t_counter() {
|
||||
cat "$(t_sysfs_path $nr)/counters/$which"
|
||||
}
|
||||
|
||||
#
|
||||
# output the difference between the current value of a counter and the
|
||||
# caller's provided previous value.
|
||||
#
|
||||
t_counter_diff_value() {
|
||||
local which="$1"
|
||||
local old="$2"
|
||||
local nr="$3"
|
||||
local new="$(t_counter $which $nr)"
|
||||
|
||||
echo "$((new - old))"
|
||||
}
|
||||
|
||||
#
|
||||
# output the value of the given counter for the given mount, defaulting
|
||||
# to mount 0 if a mount isn't specified.
|
||||
# to mount 0 if a mount isn't specified. For tests which expect a
|
||||
# specific difference in counters.
|
||||
#
|
||||
t_counter_diff() {
|
||||
local which="$1"
|
||||
local old="$2"
|
||||
local nr="$3"
|
||||
local new
|
||||
|
||||
new="$(t_counter $which $nr)"
|
||||
echo "counter $which diff $((new - old))"
|
||||
echo "counter $which diff $(t_counter_diff_value $which $old $nr)"
|
||||
}
|
||||
|
||||
#
|
||||
# output a message indicating whether or not the counter value changed.
|
||||
# For tests that expect a difference, or not, but the amount of
|
||||
# difference isn't significant.
|
||||
#
|
||||
t_counter_diff_changed() {
|
||||
local which="$1"
|
||||
local old="$2"
|
||||
local nr="$3"
|
||||
local diff="$(t_counter_diff_value $which $old $nr)"
|
||||
|
||||
test "$diff" -eq 0 && \
|
||||
echo "counter $which didn't change" ||
|
||||
echo "counter $which changed"
|
||||
}
|
||||
|
||||
@@ -21,5 +21,20 @@ t_require_mounts() {
|
||||
local req="$1"
|
||||
|
||||
test "$T_NR_MOUNTS" -ge "$req" || \
|
||||
t_fail "$req mounts required, only have $T_NR_MOUNTS"
|
||||
t_skip "$req mounts required, only have $T_NR_MOUNTS"
|
||||
}
|
||||
|
||||
#
|
||||
# Require that the meta device be at least the size string argument, as
|
||||
# parsed by numfmt using single char base 2 suffixes (iec).. 64G, etc.
|
||||
#
|
||||
t_require_meta_size() {
|
||||
local dev="$T_META_DEVICE"
|
||||
local req_iec="$1"
|
||||
local req_bytes=$(numfmt --from=iec --to=none $req_iec)
|
||||
local dev_bytes=$(blockdev --getsize64 $dev)
|
||||
local dev_iec=$(numfmt --from=auto --to=iec $dev_bytes)
|
||||
|
||||
test "$dev_bytes" -ge "$req_bytes" || \
|
||||
t_skip "$dev must be at least $req_iec, is $dev_iec"
|
||||
}
|
||||
|
||||
52
tests/golden/block-stale-reads
Normal file
52
tests/golden/block-stale-reads
Normal file
@@ -0,0 +1,52 @@
|
||||
== create shared test file
|
||||
== set and get xattrs between mount pairs while retrying
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="1"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="2"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="3"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="4"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="5"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="6"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="7"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="8"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="9"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="10"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
3
tests/golden/mount-unmount-race
Normal file
3
tests/golden/mount-unmount-race
Normal file
@@ -0,0 +1,3 @@
|
||||
== create per mount files
|
||||
== 30s of racing random mount/umount
|
||||
== mounting any unmounted
|
||||
33
tests/golden/move-blocks
Normal file
33
tests/golden/move-blocks
Normal file
@@ -0,0 +1,33 @@
|
||||
== build test files
|
||||
== wrapped offsets should fail
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Value too large for defined data type (75)
|
||||
scoutfs: move-blocks failed: Value too large for defined data type (75)
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Value too large for defined data type (75)
|
||||
scoutfs: move-blocks failed: Value too large for defined data type (75)
|
||||
== specifying same file fails
|
||||
ioctl failed on '/mnt/test/test/move-blocks/hardlink': Invalid argument (22)
|
||||
scoutfs: move-blocks failed: Invalid argument (22)
|
||||
== specifying files in other file systems fails
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Invalid cross-device link (18)
|
||||
scoutfs: move-blocks failed: Invalid cross-device link (18)
|
||||
== offsets must be multiples of 4KB
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Invalid argument (22)
|
||||
scoutfs: move-blocks failed: Invalid argument (22)
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Invalid argument (22)
|
||||
scoutfs: move-blocks failed: Invalid argument (22)
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Invalid argument (22)
|
||||
scoutfs: move-blocks failed: Invalid argument (22)
|
||||
== can't move onto existing extent
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': Invalid argument (22)
|
||||
scoutfs: move-blocks failed: Invalid argument (22)
|
||||
== can't move between files with offline extents
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': No data available (61)
|
||||
scoutfs: move-blocks failed: No data available (61)
|
||||
ioctl failed on '/mnt/test/test/move-blocks/to': No data available (61)
|
||||
scoutfs: move-blocks failed: No data available (61)
|
||||
== basic moves work
|
||||
== moving final partial block sets partial i_size
|
||||
123
|
||||
== moving updates inode fields
|
||||
== moving blocks backwards works
|
||||
== combine many files into one
|
||||
@@ -1,6 +1,6 @@
|
||||
== create files
|
||||
== waiter shows up in ioctl
|
||||
offline wating should be empty:
|
||||
offline waiting should be empty:
|
||||
0
|
||||
offline waiting should now have one known entry:
|
||||
== multiple waiters on same block listed once
|
||||
@@ -8,7 +8,7 @@ offline waiting still has one known entry:
|
||||
== different blocks show up
|
||||
offline waiting now has two known entries:
|
||||
== staging wakes everyone
|
||||
offline wating should be empty again:
|
||||
offline waiting should be empty again:
|
||||
0
|
||||
== interruption does no harm
|
||||
offline waiting should now have one known entry:
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
== 0 data_version arg fails
|
||||
setattr_more ioctl failed on '/mnt/test/test/setattr_more/file': Invalid argument (22)
|
||||
scoutfs: setattr failed: Invalid argument (22)
|
||||
setattr: data version must not be 0
|
||||
Try `setattr --help' or `setattr --usage' for more information.
|
||||
== args must specify size and offline
|
||||
setattr_more ioctl failed on '/mnt/test/test/setattr_more/file': Invalid argument (22)
|
||||
scoutfs: setattr failed: Invalid argument (22)
|
||||
setattr: must provide size if using --offline option
|
||||
Try `setattr --help' or `setattr --usage' for more information.
|
||||
== only works on regular files
|
||||
failed to open '/mnt/test/test/setattr_more/dir': Is a directory (21)
|
||||
scoutfs: setattr failed: Is a directory (21)
|
||||
|
||||
@@ -8,16 +8,16 @@
|
||||
release ioctl failed: Invalid argument (22)
|
||||
scoutfs: release failed: Invalid argument (22)
|
||||
== releasing non-file fails
|
||||
ioctl failed on '/mnt/test/test/simple-release-extents/file-char': Inappropriate ioctl for device (25)
|
||||
release ioctl failed: Inappropriate ioctl for device (25)
|
||||
scoutfs: release failed: Inappropriate ioctl for device (25)
|
||||
ioctl failed: Inappropriate ioctl for device (25)
|
||||
release: must provide file version --data-version
|
||||
Try `release --help' or `release --usage' for more information.
|
||||
== releasing a non-scoutfs file fails
|
||||
ioctl failed on '/dev/null': Inappropriate ioctl for device (25)
|
||||
release ioctl failed: Inappropriate ioctl for device (25)
|
||||
scoutfs: release failed: Inappropriate ioctl for device (25)
|
||||
ioctl failed: Inappropriate ioctl for device (25)
|
||||
release: must provide file version --data-version
|
||||
Try `release --help' or `release --usage' for more information.
|
||||
== releasing bad version fails
|
||||
release ioctl failed: Stale file handle (116)
|
||||
scoutfs: release failed: Stale file handle (116)
|
||||
release: must provide file version --data-version
|
||||
Try `release --help' or `release --usage' for more information.
|
||||
== verify small release merging
|
||||
0 0 0: (0 0 1) (1 101 4)
|
||||
0 0 1: (0 0 2) (2 102 3)
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
== release+stage shouldn't change stat, data seq or vers
|
||||
== stage does change meta_seq
|
||||
== can't use stage to extend online file
|
||||
stage returned -1, not 4096: error Invalid argument (22)
|
||||
scoutfs: stage failed: Input/output error (5)
|
||||
stage: must provide file version with --data-version
|
||||
Try `stage --help' or `stage --usage' for more information.
|
||||
== wrapped region fails
|
||||
stage returned -1, not 4096: error Invalid argument (22)
|
||||
scoutfs: stage failed: Input/output error (5)
|
||||
@@ -18,6 +18,6 @@ scoutfs: stage failed: Input/output error (5)
|
||||
== partial final block that writes to i_size does work
|
||||
== zero length stage doesn't bring blocks online
|
||||
== stage of non-regular file fails
|
||||
ioctl failed on '/mnt/test/test/simple-staging/file-char': Inappropriate ioctl for device (25)
|
||||
stage returned -1, not 1: error Inappropriate ioctl for device (25)
|
||||
scoutfs: stage failed: Input/output error (5)
|
||||
ioctl failed: Inappropriate ioctl for device (25)
|
||||
stage: must provide file version with --data-version
|
||||
Try `stage --help' or `stage --usage' for more information.
|
||||
|
||||
0
tests/golden/stage-multi-part
Normal file
0
tests/golden/stage-multi-part
Normal file
18
tests/golden/stage-tmpfile
Normal file
18
tests/golden/stage-tmpfile
Normal file
@@ -0,0 +1,18 @@
|
||||
total file size 33669120
|
||||
00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 |AAAAAAAAAAAAAAAA|
|
||||
*
|
||||
00400000 42 42 42 42 42 42 42 42 42 42 42 42 42 42 42 42 |BBBBBBBBBBBBBBBB|
|
||||
*
|
||||
00801000 43 43 43 43 43 43 43 43 43 43 43 43 43 43 43 43 |CCCCCCCCCCCCCCCC|
|
||||
*
|
||||
00c03000 44 44 44 44 44 44 44 44 44 44 44 44 44 44 44 44 |DDDDDDDDDDDDDDDD|
|
||||
*
|
||||
01006000 45 45 45 45 45 45 45 45 45 45 45 45 45 45 45 45 |EEEEEEEEEEEEEEEE|
|
||||
*
|
||||
0140a000 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 |FFFFFFFFFFFFFFFF|
|
||||
*
|
||||
0180f000 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 |GGGGGGGGGGGGGGGG|
|
||||
*
|
||||
01c15000 48 48 48 48 48 48 48 48 48 48 48 48 48 48 48 48 |HHHHHHHHHHHHHHHH|
|
||||
*
|
||||
0201c000
|
||||
@@ -1,11 +0,0 @@
|
||||
== create file for xattr ping pong
|
||||
# file: /mnt/test/test/stale-btree-read/file
|
||||
user.xat="initial"
|
||||
|
||||
== retry btree block read
|
||||
trigger btree_stale_read armed: 1
|
||||
# file: /mnt/test/test/stale-btree-read/file
|
||||
user.xat="btree"
|
||||
|
||||
trigger btree_stale_read after: 0
|
||||
counter btree_stale_read diff 1
|
||||
@@ -1,6 +1,7 @@
|
||||
Ran:
|
||||
generic/001
|
||||
generic/002
|
||||
generic/004
|
||||
generic/005
|
||||
generic/006
|
||||
generic/007
|
||||
@@ -73,7 +74,6 @@ generic/376
|
||||
generic/377
|
||||
Not
|
||||
run:
|
||||
generic/004
|
||||
generic/008
|
||||
generic/009
|
||||
generic/012
|
||||
@@ -278,4 +278,4 @@ shared/004
|
||||
shared/032
|
||||
shared/051
|
||||
shared/289
|
||||
Passed all 72 tests
|
||||
Passed all 73 tests
|
||||
|
||||
@@ -52,16 +52,17 @@ $(basename $0) options:
|
||||
| the file system to be tested. Will be clobbered by -m mkfs.
|
||||
-m | Run mkfs on the device before mounting and running
|
||||
| tests. Implies unmounting existing mounts first.
|
||||
-n | The number of devices and mounts to test.
|
||||
-P | Output trace events with printk as they're generated.
|
||||
-n <nr> | The number of devices and mounts to test.
|
||||
-P | Enable trace_printk.
|
||||
-p | Exit script after preparing mounts only, don't run tests.
|
||||
-q <nr> | Specify the quorum count needed to mount. This is
|
||||
| used when running mkfs and is needed by a few tests.
|
||||
-q <nr> | The first <nr> mounts will be quorum members. Must be
|
||||
| at least 1 and no greater than -n number of mounts.
|
||||
-r <dir> | Specify the directory in which to store results of
|
||||
| test runs. The directory will be created if it doesn't
|
||||
| exist. Previous results will be deleted as each test runs.
|
||||
-s | Skip git repo checkouts.
|
||||
-t | Enabled trace events that match the given glob argument.
|
||||
| Multiple options enable multiple globbed events.
|
||||
-X | xfstests git repo. Used by tests/xfstests.sh.
|
||||
-x | xfstests git branch to checkout and track.
|
||||
-y | xfstests ./check additional args
|
||||
@@ -77,6 +78,9 @@ done
|
||||
T_TRACE_DUMP="0"
|
||||
T_TRACE_PRINTK="0"
|
||||
|
||||
# array declarations to be able to use array ops
|
||||
declare -a T_TRACE_GLOB
|
||||
|
||||
while true; do
|
||||
case $1 in
|
||||
-a)
|
||||
@@ -147,7 +151,7 @@ while true; do
|
||||
;;
|
||||
-t)
|
||||
test -n "$2" || die "-t must have trace glob argument"
|
||||
T_TRACE_GLOB="$2"
|
||||
T_TRACE_GLOB+=("$2")
|
||||
shift
|
||||
;;
|
||||
-X)
|
||||
@@ -195,7 +199,6 @@ test -e "$T_EX_META_DEV" || die "extra meta device -f '$T_EX_META_DEV' doesn't e
|
||||
test -n "$T_EX_DATA_DEV" || die "must specify -e extra data device"
|
||||
test -e "$T_EX_DATA_DEV" || die "extra data device -e '$T_EX_DATA_DEV' doesn't exist"
|
||||
|
||||
test -n "$T_MKFS" -a -z "$T_QUORUM" && die "mkfs (-m) requires quorum (-q)"
|
||||
test -n "$T_RESULTS" || die "must specify -r results dir"
|
||||
test -n "$T_XFSTESTS_REPO" -a -z "$T_XFSTESTS_BRANCH" -a -z "$T_SKIP_CHECKOUT" && \
|
||||
die "-X xfstests repo requires -x xfstests branch"
|
||||
@@ -205,6 +208,12 @@ test -n "$T_XFSTESTS_BRANCH" -a -z "$T_XFSTESTS_REPO" -a -z "$T_SKIP_CHECKOUT" &
|
||||
test -n "$T_NR_MOUNTS" || die "must specify -n nr mounts"
|
||||
test "$T_NR_MOUNTS" -ge 1 -a "$T_NR_MOUNTS" -le 8 || \
|
||||
die "-n nr mounts must be >= 1 and <= 8"
|
||||
test -n "$T_QUORUM" || \
|
||||
die "must specify -q number of mounts that are quorum members"
|
||||
test "$T_QUORUM" -ge "1" || \
|
||||
die "-q quorum mmembers must be at least 1"
|
||||
test "$T_QUORUM" -le "$T_NR_MOUNTS" || \
|
||||
die "-q quorum mmembers must not be greater than -n mounts"
|
||||
|
||||
# top level paths
|
||||
T_KMOD=$(realpath "$(dirname $0)/../kmod")
|
||||
@@ -303,8 +312,14 @@ if [ -n "$T_UNMOUNT" ]; then
|
||||
unmount_all
|
||||
fi
|
||||
|
||||
quo=""
|
||||
if [ -n "$T_MKFS" ]; then
|
||||
cmd scoutfs mkfs -Q "$T_QUORUM" "$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
for i in $(seq -0 $((T_QUORUM - 1))); do
|
||||
quo="$quo -Q $i,127.0.0.1,$((42000 + i))"
|
||||
done
|
||||
|
||||
msg "making new filesystem with $T_QUORUM quorum members"
|
||||
cmd scoutfs mkfs -f $quo "$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
fi
|
||||
|
||||
if [ -n "$T_INSMOD" ]; then
|
||||
@@ -314,23 +329,37 @@ if [ -n "$T_INSMOD" ]; then
|
||||
cmd insmod "$T_KMOD/src/scoutfs.ko"
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_GLOB" ]; then
|
||||
msg "enabling trace events"
|
||||
nr_globs=${#T_TRACE_GLOB[@]}
|
||||
if [ $nr_globs -gt 0 ]; then
|
||||
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
|
||||
for g in $T_TRACE_GLOB; do
|
||||
|
||||
for g in "${T_TRACE_GLOB[@]}"; do
|
||||
for e in /sys/kernel/debug/tracing/events/scoutfs/$g/enable; do
|
||||
echo 1 > $e
|
||||
if test -w "$e"; then
|
||||
echo 1 > "$e"
|
||||
else
|
||||
die "-t glob '$g' matched no scoutfs events"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "$T_TRACE_DUMP" > /proc/sys/kernel/ftrace_dump_on_oops
|
||||
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
|
||||
|
||||
cmd cat /sys/kernel/debug/tracing/set_event
|
||||
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
nr_events=$(cat /sys/kernel/debug/tracing/set_event | wc -l)
|
||||
msg "enabled $nr_events trace events from $nr_globs -t globs"
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_PRINTK" ]; then
|
||||
echo "$T_TRACE_PRINTK" > /sys/kernel/debug/tracing/options/trace_printk
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_DUMP" ]; then
|
||||
echo "$T_TRACE_DUMP" > /proc/sys/kernel/ftrace_dump_on_oops
|
||||
fi
|
||||
|
||||
# always describe tracing in the logs
|
||||
cmd cat /sys/kernel/debug/tracing/set_event
|
||||
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
|
||||
#
|
||||
# mount concurrently so that a quorum is present to elect the leader and
|
||||
# start a server.
|
||||
@@ -347,8 +376,12 @@ for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="/mnt/test.$i"
|
||||
test -d "$dir" || cmd mkdir -p "$dir"
|
||||
|
||||
opts="-o metadev_path=$meta_dev"
|
||||
if [ "$i" -lt "$T_QUORUM" ]; then
|
||||
opts="$opts,quorum_slot_nr=$i"
|
||||
fi
|
||||
|
||||
msg "mounting $meta_dev|$data_dev on $dir"
|
||||
opts="-o server_addr=127.0.0.1,metadev_path=$meta_dev"
|
||||
cmd mount -t scoutfs $opts "$data_dev" "$dir" &
|
||||
|
||||
p="$!"
|
||||
@@ -434,7 +467,7 @@ for t in $tests; do
|
||||
|
||||
# get stats from previous pass
|
||||
last="$T_RESULTS/last-passed-test-stats"
|
||||
stats=$(grep -s "^$test_name" "$last" | cut -d " " -f 2-)
|
||||
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
|
||||
test -n "$stats" && stats="last: $stats"
|
||||
|
||||
printf " %-30s $stats" "$test_name"
|
||||
@@ -497,7 +530,7 @@ for t in $tests; do
|
||||
echo " passed: $stats"
|
||||
((passed++))
|
||||
# save stats for passed test
|
||||
grep -s -v "^$test_name" "$last" > "$last.tmp"
|
||||
grep -s -v "^$test_name " "$last" > "$last.tmp"
|
||||
echo "$test_name $stats" >> "$last.tmp"
|
||||
mv -f "$last.tmp" "$last"
|
||||
elif [ "$sts" == "$T_SKIP_STATUS" ]; then
|
||||
@@ -515,23 +548,24 @@ done
|
||||
|
||||
msg "all tests run: $passed passed, $skipped skipped, $failed failed"
|
||||
|
||||
unmount_all
|
||||
|
||||
if [ -n "$T_TRACE_GLOB" ]; then
|
||||
if [ -n "$T_TRACE_GLOB" -o -n "$T_TRACE_PRINTK" ]; then
|
||||
msg "saving traces and disabling tracing"
|
||||
echo 0 > /sys/kernel/debug/tracing/events/scoutfs/enable
|
||||
echo 0 > /sys/kernel/debug/tracing/options/trace_printk
|
||||
cat /sys/kernel/debug/tracing/trace > "$T_RESULTS/traces"
|
||||
fi
|
||||
|
||||
if [ "$skipped" == 0 -a "$failed" == 0 ]; then
|
||||
msg "all tests passed"
|
||||
unmount_all
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$skipped" != 0 ]; then
|
||||
msg "$skipped tests skipped, check skip.log"
|
||||
msg "$skipped tests skipped, check skip.log, still mounted"
|
||||
fi
|
||||
if [ "$failed" != 0 ]; then
|
||||
msg "$failed tests failed, check fail.log"
|
||||
msg "$failed tests failed, check fail.log, still mounted"
|
||||
fi
|
||||
exit 1
|
||||
|
||||
@@ -6,6 +6,7 @@ simple-staging.sh
|
||||
simple-release-extents.sh
|
||||
setattr_more.sh
|
||||
offline-extent-waiting.sh
|
||||
move-blocks.sh
|
||||
srch-basic-functionality.sh
|
||||
simple-xattr-unit.sh
|
||||
lock-refleak.sh
|
||||
@@ -16,6 +17,8 @@ createmany-parallel.sh
|
||||
createmany-large-names.sh
|
||||
createmany-rename-large-dir.sh
|
||||
stage-release-race-alloc.sh
|
||||
stage-multi-part.sh
|
||||
stage-tmpfile.sh
|
||||
basic-posix-consistency.sh
|
||||
dirent-consistency.sh
|
||||
lock-ex-race-processes.sh
|
||||
@@ -26,5 +29,5 @@ setup-error-teardown.sh
|
||||
mount-unmount-race.sh
|
||||
createmany-parallel-mounts.sh
|
||||
archive-light-cycle.sh
|
||||
stale-btree-read.sh
|
||||
block-stale-reads.sh
|
||||
xfstests.sh
|
||||
|
||||
145
tests/src/stage_tmpfile.c
Normal file
145
tests/src/stage_tmpfile.c
Normal file
@@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Exercise O_TMPFILE creation as well as staging from tmpfiles into
|
||||
* a released destination file.
|
||||
*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "ioctl.h"
|
||||
|
||||
#define array_size(arr) (sizeof(arr) / sizeof(arr[0]))
|
||||
|
||||
/*
|
||||
* Write known data into 8 tmpfiles.
|
||||
* Make a new file X and release it
|
||||
* Move contents of 8 tmpfiles into X.
|
||||
*/
|
||||
|
||||
struct sub_tmp_info {
|
||||
int fd;
|
||||
unsigned int offset;
|
||||
unsigned int length;
|
||||
};
|
||||
|
||||
#define SZ 4096
|
||||
char buf[SZ];
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct scoutfs_ioctl_release ioctl_args = {0};
|
||||
struct scoutfs_ioctl_move_blocks mb;
|
||||
struct sub_tmp_info sub_tmps[8];
|
||||
int tot_size = 0;
|
||||
char *dest_file;
|
||||
int dest_fd;
|
||||
char *mnt;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("%s <mountpoint> <dest_file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
mnt = argv[1];
|
||||
dest_file = argv[2];
|
||||
|
||||
for (i = 0; i < array_size(sub_tmps); i++) {
|
||||
struct sub_tmp_info *sub_tmp = &sub_tmps[i];
|
||||
int remaining;
|
||||
|
||||
sub_tmp->fd = open(mnt, O_RDWR | O_TMPFILE, S_IRUSR | S_IWUSR);
|
||||
if (sub_tmp->fd < 0) {
|
||||
perror("error");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sub_tmp->offset = tot_size;
|
||||
|
||||
/* First tmp file is 4MB */
|
||||
/* Each is 4k bigger than last */
|
||||
sub_tmp->length = (i + 1024) * sizeof(buf);
|
||||
|
||||
remaining = sub_tmp->length;
|
||||
|
||||
/* Each sub tmpfile written with 'A', 'B', etc. */
|
||||
memset(buf, 'A' + i, sizeof(buf));
|
||||
while (remaining) {
|
||||
int written;
|
||||
|
||||
written = write(sub_tmp->fd, buf, sizeof(buf));
|
||||
assert(written == sizeof(buf));
|
||||
tot_size += sizeof(buf);
|
||||
remaining -= written;
|
||||
}
|
||||
}
|
||||
|
||||
printf("total file size %d\n", tot_size);
|
||||
|
||||
dest_fd = open(dest_file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
|
||||
if (dest_fd == -1) {
|
||||
perror("error");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// make dest file big
|
||||
ret = posix_fallocate(dest_fd, 0, tot_size);
|
||||
if (ret) {
|
||||
perror("error");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// release everything in dest file
|
||||
ioctl_args.offset = 0;
|
||||
ioctl_args.length = tot_size;
|
||||
ioctl_args.data_version = 0;
|
||||
|
||||
ret = ioctl(dest_fd, SCOUTFS_IOC_RELEASE, &ioctl_args);
|
||||
if (ret < 0) {
|
||||
perror("error");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// move contents into dest in reverse order
|
||||
for (i = array_size(sub_tmps) - 1; i >= 0 ; i--) {
|
||||
struct sub_tmp_info *sub_tmp = &sub_tmps[i];
|
||||
|
||||
mb.from_fd = sub_tmp->fd;
|
||||
mb.from_off = 0;
|
||||
mb.len = sub_tmp->length;
|
||||
mb.to_off = sub_tmp->offset;
|
||||
mb.data_version = 0;
|
||||
mb.flags = SCOUTFS_IOC_MB_STAGE;
|
||||
|
||||
ret = ioctl(dest_fd, SCOUTFS_IOC_MOVE_BLOCKS, &mb);
|
||||
if (ret < 0) {
|
||||
perror("error");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -161,9 +161,9 @@ for n in $(t_fs_nrs); do
|
||||
echo "bash $gen $blocks $n $p $f > $path" >> $create
|
||||
echo "cmp $path <(bash $gen $blocks $n $p $f)" >> $verify
|
||||
echo "vers=\$(scoutfs stat -s data_version $path)" >> $release
|
||||
echo "scoutfs release $path \$vers 0 $blocks" >> $release
|
||||
echo "scoutfs release $path -V \$vers -o 0 -l $bytes" >> $release
|
||||
echo "vers=\$(scoutfs stat -s data_version $path)" >> $stage
|
||||
echo "scoutfs stage $path \$vers 0 $bytes <(bash $gen $blocks $n $p $f)" >> $stage
|
||||
echo "scoutfs stage <(bash $gen $blocks $n $p $f) $path -V \$vers -o 0 -l $bytes " >> $stage
|
||||
echo "rm -f $path" >> $unlink
|
||||
|
||||
echo "x=\$(scoutfs stat -s online_blocks $path)" >> $online
|
||||
|
||||
@@ -9,14 +9,14 @@ t_require_commands scoutfs dd truncate touch mkdir rm rmdir
|
||||
release_vers() {
|
||||
local file="$1"
|
||||
local vers="$2"
|
||||
local block="$3"
|
||||
local count="$4"
|
||||
local offset="$3"
|
||||
local length="$4"
|
||||
|
||||
if [ "$vers" == "stat" ]; then
|
||||
vers=$(scoutfs stat -s data_version "$file")
|
||||
fi
|
||||
|
||||
scoutfs release "$file" "$vers" "$block" "$count"
|
||||
scoutfs release "$file" -V "$vers" -o "$offset" -l "$length"
|
||||
}
|
||||
|
||||
# if vers is "stat" then we ask stat_more for the data_version
|
||||
@@ -24,14 +24,14 @@ stage_vers() {
|
||||
local file="$1"
|
||||
local vers="$2"
|
||||
local offset="$3"
|
||||
local count="$4"
|
||||
local length="$4"
|
||||
local contents="$5"
|
||||
|
||||
if [ "$vers" == "stat" ]; then
|
||||
vers=$(scoutfs stat -s data_version "$file")
|
||||
fi
|
||||
|
||||
scoutfs stage "$file" "$vers" "$offset" "$count" "$contents"
|
||||
scoutfs stage "$contents" "$file" -V "$vers" -o "$offset" -l "$length"
|
||||
}
|
||||
|
||||
echo_blocks()
|
||||
@@ -57,15 +57,15 @@ dd if=/dev/zero of="$FILE" bs=4K count=1 conv=notrunc oflag=append status=none
|
||||
echo_blocks "$FILE"
|
||||
|
||||
echo "== release"
|
||||
release_vers "$FILE" stat 0 2
|
||||
release_vers "$FILE" stat 0 8K
|
||||
echo_blocks "$FILE"
|
||||
|
||||
echo "== duplicate release"
|
||||
release_vers "$FILE" stat 0 2
|
||||
release_vers "$FILE" stat 0 8K
|
||||
echo_blocks "$FILE"
|
||||
|
||||
echo "== duplicate release past i_size"
|
||||
release_vers "$FILE" stat 0 16
|
||||
release_vers "$FILE" stat 0 64K
|
||||
echo_blocks "$FILE"
|
||||
|
||||
echo "== stage"
|
||||
|
||||
@@ -160,8 +160,8 @@ for i in $(seq 1 1); do
|
||||
mkdir -p $(dirname $lnk)
|
||||
ln "$T_D0/file" $lnk
|
||||
|
||||
scoutfs ino-path $ino "$T_M0" > "$T_TMP.0"
|
||||
scoutfs ino-path $ino "$T_M1" > "$T_TMP.1"
|
||||
scoutfs ino-path -p "$T_M0" $ino > "$T_TMP.0"
|
||||
scoutfs ino-path -p "$T_M1" $ino > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
done
|
||||
done
|
||||
@@ -169,32 +169,32 @@ rm -rf "$T_D0/dir"
|
||||
|
||||
echo "== inode indexes match after syncing existing"
|
||||
t_sync_seq_index
|
||||
scoutfs walk-inodes meta_seq 0 -1 "$T_M0" > "$T_TMP.0"
|
||||
scoutfs walk-inodes meta_seq 0 -1 "$T_M1" > "$T_TMP.1"
|
||||
scoutfs walk-inodes -p "$T_M0" -- meta_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- meta_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M0" > "$T_TMP.0"
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M1" > "$T_TMP.1"
|
||||
scoutfs walk-inodes -p "$T_M0" -- data_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- data_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
|
||||
echo "== inode indexes match after copying and syncing"
|
||||
mkdir "$T_D0/dir"
|
||||
cp -ar /boot/conf* "$T_D0/dir"
|
||||
t_sync_seq_index
|
||||
scoutfs walk-inodes meta_seq 0 -1 "$T_M0" > "$T_TMP.0"
|
||||
scoutfs walk-inodes meta_seq 0 -1 "$T_M1" > "$T_TMP.1"
|
||||
scoutfs walk-inodes -p "$T_M0" -- meta_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- meta_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M0" > "$T_TMP.0"
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M1" > "$T_TMP.1"
|
||||
scoutfs walk-inodes -p "$T_M0" -- data_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- data_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
|
||||
echo "== inode indexes match after removing and syncing"
|
||||
rm -f "$T_D1/dir/conf*"
|
||||
t_sync_seq_index
|
||||
scoutfs walk-inodes meta_seq 0 -1 "$T_M0" > "$T_TMP.0"
|
||||
scoutfs walk-inodes meta_seq 0 -1 "$T_M1" > "$T_TMP.1"
|
||||
scoutfs walk-inodes -p "$T_M0" -- meta_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- meta_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M0" > "$T_TMP.0"
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M1" > "$T_TMP.1"
|
||||
scoutfs walk-inodes -p "$T_M0" -- data_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- data_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
|
||||
t_pass
|
||||
|
||||
61
tests/tests/block-stale-reads.sh
Normal file
61
tests/tests/block-stale-reads.sh
Normal file
@@ -0,0 +1,61 @@
|
||||
#
|
||||
# Exercise stale block reading.
|
||||
#
|
||||
# It would be very difficult to manipulate the allocators, cache, and
|
||||
# persistent blocks to create stable block reading scenarios. Instead
|
||||
# we use triggers to exercise how readers encounter stale blocks.
|
||||
#
|
||||
|
||||
t_require_commands touch setfattr getfattr
|
||||
|
||||
inc_wrap_fs_nr()
|
||||
{
|
||||
local nr="$(($1 + 1))"
|
||||
|
||||
if [ "$nr" == "$T_NR_MOUNTS" ]; then
|
||||
nr=0
|
||||
fi
|
||||
|
||||
echo $nr
|
||||
}
|
||||
|
||||
GETFATTR="getfattr --absolute-names"
|
||||
SETFATTR="setfattr"
|
||||
|
||||
echo "== create shared test file"
|
||||
touch "$T_D0/file"
|
||||
$SETFATTR -n user.xat -v 0 "$T_D0/file"
|
||||
|
||||
#
|
||||
# Trigger retries in the block cache as we bounce xattr values around
|
||||
# between sequential pairs of mounts. This is a little silly because if
|
||||
# either of the mounts are the server then they'll almost certaily have
|
||||
# their trigger fired prematurely by message handling btree calls while
|
||||
# working with the t_ helpers long before we work with the xattrs. But
|
||||
# the block cache stale retry path is still being exercised.
|
||||
#
|
||||
echo "== set and get xattrs between mount pairs while retrying"
|
||||
set_nr=0
|
||||
get_nr=$(inc_wrap_fs_nr $set_nr)
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
eval set_file="\$T_D${set_nr}/file"
|
||||
eval get_file="\$T_D${get_nr}/file"
|
||||
|
||||
old_set=$(t_counter block_cache_remove_stale $set_nr)
|
||||
old_get=$(t_counter block_cache_remove_stale $get_nr)
|
||||
|
||||
t_trigger_arm_silent block_remove_stale $set_nr
|
||||
t_trigger_arm_silent block_remove_stale $get_nr
|
||||
|
||||
$SETFATTR -n user.xat -v $i "$set_file"
|
||||
$GETFATTR -n user.xat "$get_file" 2>&1 | t_filter_fs
|
||||
|
||||
t_counter_diff_changed block_cache_remove_stale $old_set $set_nr
|
||||
t_counter_diff_changed block_cache_remove_stale $old_get $get_nr
|
||||
|
||||
set_nr="$get_nr"
|
||||
get_nr=$(inc_wrap_fs_nr $set_nr)
|
||||
done
|
||||
|
||||
t_pass
|
||||
@@ -30,7 +30,7 @@ echo "== create files and sync"
|
||||
dd if=/dev/zero of="$DIR/truncate" bs=4096 count=1 status=none
|
||||
dd if=/dev/zero of="$DIR/stage" bs=4096 count=1 status=none
|
||||
vers=$(scoutfs stat -s data_version "$DIR/stage")
|
||||
scoutfs release "$DIR/stage" $vers 0 1
|
||||
scoutfs release "$DIR/stage" -V $vers -o 0 -l 4K
|
||||
dd if=/dev/zero of="$DIR/release" bs=4096 count=1 status=none
|
||||
touch "$DIR/write_end"
|
||||
mkdir "$DIR"/{mknod_dir,link_dir,unlink_dir,symlink_dir,rename_dir}
|
||||
@@ -41,9 +41,9 @@ sync; sync
|
||||
echo "== modify files"
|
||||
truncate -s 0 "$DIR/truncate"
|
||||
vers=$(scoutfs stat -s data_version "$DIR/stage")
|
||||
scoutfs stage "$DIR/stage" $vers 0 4096 /dev/zero
|
||||
scoutfs stage /dev/zero "$DIR/stage" -V $vers -o 0 -l 4096
|
||||
vers=$(scoutfs stat -s data_version "$DIR/release")
|
||||
scoutfs release "$DIR/release" $vers 0 1
|
||||
scoutfs release "$DIR/release" -V $vers -o 0 -l 4K
|
||||
dd if=/dev/zero of="$DIR/write_end" bs=4096 count=1 status=none conv=notrunc
|
||||
touch $DIR/mknod_dir/mknod_file
|
||||
touch $DIR/link_dir/link_targ
|
||||
|
||||
@@ -50,7 +50,7 @@ for m in 0 1; do
|
||||
done
|
||||
wait
|
||||
CONF="$((SECONDS - START))"
|
||||
echo "conf: $IND" >> $T_TMP.log
|
||||
echo "conf: $CONF" >> $T_TMP.log
|
||||
|
||||
if [ "$CONF" -gt "$((IND * 5))" ]; then
|
||||
t_fail "conflicting $CONF secs is more than 5x independent $IND secs"
|
||||
|
||||
@@ -9,7 +9,7 @@ FILE="$T_D0/file"
|
||||
echo "== race writing and index walking"
|
||||
for i in $(seq 1 10); do
|
||||
dd if=/dev/zero of="$FILE" bs=4K count=1 status=none conv=notrunc &
|
||||
scoutfs walk-inodes data_seq 0 -1 "$T_M0" > /dev/null &
|
||||
scoutfs walk-inodes -p "$T_M0" -- data_seq 0 -1 > /dev/null &
|
||||
wait
|
||||
done
|
||||
|
||||
|
||||
@@ -4,25 +4,23 @@
|
||||
# At the start of the test all mounts are mounted. Each iteration
|
||||
# randomly decides to change each mount or to leave it alone.
|
||||
#
|
||||
# They create dirty items before unmounting to encourage compaction
|
||||
# while unmounting
|
||||
# Each iteration create dirty items across the mounts randomly, giving
|
||||
# unmount some work to do.
|
||||
#
|
||||
# For this test to be meaningful it needs multiple mounts beyond the
|
||||
# quorum set which can be racing to mount and unmount. A reasonable
|
||||
# config would be 5 mounts with 3 quorum. But the test will run with
|
||||
# whatever count it finds.
|
||||
# quorum majority which can be racing to mount and unmount. A
|
||||
# reasonable config would be 5 mounts with 3 quorum members. But the
|
||||
# test will run with whatever count it finds.
|
||||
#
|
||||
# This assumes that all the mounts are configured as voting servers. We
|
||||
# could update it to be more clever and know that it can always safely
|
||||
# unmount mounts that aren't configured as servers.
|
||||
# The test assumes that the first mounts are the quorum members.
|
||||
#
|
||||
|
||||
# nothing to do if we can't unmount
|
||||
test "$T_NR_MOUNTS" == "$T_QUORUM" && \
|
||||
t_skip "only quorum members mounted, can't unmount"
|
||||
majority_nr=$(t_majority_count)
|
||||
quorum_nr=$T_QUORUM
|
||||
|
||||
nr_mounted=$T_NR_MOUNTS
|
||||
nr_quorum=$T_QUORUM
|
||||
cur_quorum=$quorum_nr
|
||||
test "$cur_quorum" == "$majority_nr" && \
|
||||
t_skip "all quorum members make up majority, need more mounts to unmount"
|
||||
|
||||
echo "== create per mount files"
|
||||
for i in $(t_fs_nrs); do
|
||||
@@ -55,25 +53,42 @@ while [ "$SECONDS" -lt "$END" ]; do
|
||||
fi
|
||||
|
||||
if [ "${mounted[$i]}" == 1 ]; then
|
||||
if [ "$nr_mounted" -gt "$nr_quorum" ]; then
|
||||
#
|
||||
# can always unmount non-quorum mounts,
|
||||
# can only unmount quorum members beyond majority
|
||||
#
|
||||
if [ "$i" -ge "$quorum_nr" -o \
|
||||
"$cur_quorum" -gt "$majority_nr" ]; then
|
||||
t_umount $i &
|
||||
pid=$!
|
||||
echo "umount $i pid $pid quo $cur_quorum" \
|
||||
>> $T_TMP.log
|
||||
pids="$pids $pid"
|
||||
mounted[$i]=0
|
||||
(( nr_mounted-- ))
|
||||
if [ "$i" -lt "$quorum_nr" ]; then
|
||||
(( cur_quorum-- ))
|
||||
fi
|
||||
fi
|
||||
else
|
||||
t_mount $i &
|
||||
pid=$!
|
||||
pids="$pids $pid"
|
||||
echo "mount $i pid $pid quo $cur_quorum" >> $T_TMP.log
|
||||
mounted[$i]=1
|
||||
(( nr_mounted++ ))
|
||||
if [ "$i" -lt "$quorum_nr" ]; then
|
||||
(( cur_quorum++ ))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "waiting (secs $SECONDS)" >> $T_TMP.log
|
||||
for p in $pids; do
|
||||
t_quiet wait $p
|
||||
wait $p
|
||||
rc=$?
|
||||
if [ "$rc" != 0 ]; then
|
||||
echo "waiting for pid $p returned $rc"
|
||||
t_fail "background mount/umount returned error"
|
||||
fi
|
||||
done
|
||||
echo "done waiting (secs $SECONDS))" >> $T_TMP.log
|
||||
done
|
||||
|
||||
169
tests/tests/move-blocks.sh
Normal file
169
tests/tests/move-blocks.sh
Normal file
@@ -0,0 +1,169 @@
|
||||
#
|
||||
# test MOVE_BLOCKS ioctl, mostly basic error testing and functionality,
|
||||
# but a bit of expected use.
|
||||
#
|
||||
|
||||
t_require_commands scoutfs dd
|
||||
|
||||
FROM="$T_D0/from"
|
||||
TO="$T_D0/to"
|
||||
HARD="$T_D0/hardlink"
|
||||
OTHER="$T_TMP.other"
|
||||
|
||||
BLOCKS=8
|
||||
BS=4096
|
||||
PART=123
|
||||
LEN=$(((BS * BLOCKS) + PART))
|
||||
PIECES=8
|
||||
|
||||
regenerate_files() {
|
||||
rm -f "$FROM"
|
||||
rm -f "$TO"
|
||||
dd if=/dev/urandom of="$FROM" bs=$LEN count=1 status=none
|
||||
touch "$TO"
|
||||
}
|
||||
|
||||
set_updated_fields() {
|
||||
local arr="$1"
|
||||
local path="$2"
|
||||
|
||||
eval $arr["ctime"]="$(stat -c '%Z' "$path")"
|
||||
eval $arr["mtime"]="$(stat -c '%Y' "$path")"
|
||||
eval $arr["data_version"]="$(scoutfs stat -s data_version "$path")"
|
||||
eval $arr["meta_seq"]="$(scoutfs stat -s meta_seq "$path")"
|
||||
eval $arr["data_seq"]="$(scoutfs stat -s data_seq "$path")"
|
||||
}
|
||||
|
||||
#
|
||||
# before moving extents manually copy the byte regions so that we have
|
||||
# expected good file contents to compare to. We know that the byte
|
||||
# regions are 4KB block aligned (with an allowance for a len that ends
|
||||
# on from i_size).
|
||||
#
|
||||
move_and_compare() {
|
||||
local from="$1"
|
||||
local from_off="$2"
|
||||
local from_blk="$((from_off / BS))"
|
||||
local len="$3"
|
||||
local blocks="$(((len + BS - 1) / BS))"
|
||||
local to="$4"
|
||||
local to_off="$5"
|
||||
local to_blk="$((to_off / BS))"
|
||||
|
||||
local right_start=$((from_blk + blocks))
|
||||
local from_size=$(stat -c "%s" "$from")
|
||||
local from_blocks=$(( (from_size + BS - 1) / BS ))
|
||||
local right_len=$((from_blocks - right_start))
|
||||
|
||||
# copying around instead of punching hole
|
||||
dd if="$from" of="$from.expected" bs="$BS" \
|
||||
skip=0 seek=0 count="$from_blk" \
|
||||
status=none
|
||||
dd if="$from" of="$from.expected" bs="$BS" \
|
||||
skip="$right_start" seek="$right_start" count="$right_len" \
|
||||
status=none conv=notrunc
|
||||
# moving doesn't truncate, expect full size when no data
|
||||
truncate -s "$from_size" "$from.expected"
|
||||
|
||||
cp "$to" "$to.expected"
|
||||
dd if="$from" of="$to.expected" bs="$BS" \
|
||||
skip="$from_blk" seek="$to_blk" count="$blocks" \
|
||||
status=none conv=notrunc
|
||||
|
||||
scoutfs move-blocks "$from" -f "$from_off" -l "$len" "$to" -t "$to_off" \
|
||||
2>&1 | t_filter_fs
|
||||
|
||||
cmp "$from" "$from.expected"
|
||||
cmp "$to" "$to.expected"
|
||||
}
|
||||
|
||||
echo "== build test files"
|
||||
regenerate_files
|
||||
touch "$OTHER"
|
||||
ln "$FROM" "$HARD"
|
||||
|
||||
echo "== wrapped offsets should fail"
|
||||
HUGE=0x8000000000000000
|
||||
scoutfs move-blocks "$FROM" -f "$HUGE" -l "$HUGE" "$TO" -t 0 2>&1 | t_filter_fs
|
||||
scoutfs move-blocks "$FROM" -f 0 -l "$HUGE" "$TO" -t "$HUGE" 2>&1 | t_filter_fs
|
||||
|
||||
echo "== specifying same file fails"
|
||||
scoutfs move-blocks "$FROM" -f 0 -l "$BS" "$HARD" -t 0 2>&1 | t_filter_fs
|
||||
|
||||
echo "== specifying files in other file systems fails"
|
||||
scoutfs move-blocks "$OTHER" -f 0 -l "$BS" "$TO" -t 0 2>&1 | t_filter_fs
|
||||
|
||||
echo "== offsets must be multiples of 4KB"
|
||||
scoutfs move-blocks "$FROM" -f 1 -l "$BS" "$TO" -t 0 2>&1 | t_filter_fs
|
||||
scoutfs move-blocks "$FROM" -f 0 -l 1 "$TO" -t 0 2>&1 | t_filter_fs
|
||||
scoutfs move-blocks "$FROM" -f 0 -l "$BS" "$TO" -t 1 2>&1 | t_filter_fs
|
||||
|
||||
echo "== can't move onto existing extent"
|
||||
dd if=/dev/urandom of="$TO" bs=$BS count=1 status=none
|
||||
scoutfs move-blocks "$FROM" -f 0 -l "$BS" "$TO" -t 0 2>&1 | t_filter_fs
|
||||
|
||||
echo "== can't move between files with offline extents"
|
||||
dd if=/dev/zero of="$TO" bs=$BS count=1 status=none
|
||||
vers=$(scoutfs stat -s data_version "$TO")
|
||||
scoutfs release "$TO" -V "$vers" -o 0 -l $BS
|
||||
scoutfs move-blocks "$FROM" -f 0 -l "$BS" "$TO" -t 0 2>&1 | t_filter_fs
|
||||
regenerate_files
|
||||
vers=$(scoutfs stat -s data_version "$FROM")
|
||||
scoutfs release "$FROM" -V "$vers" -o 0 -l $BS
|
||||
scoutfs move-blocks "$FROM" -f 0 -l "$BS" "$TO" -t 0 2>&1 | t_filter_fs
|
||||
regenerate_files
|
||||
|
||||
echo "== basic moves work"
|
||||
move_and_compare "$FROM" 0 "$BS" "$TO" 0
|
||||
regenerate_files
|
||||
move_and_compare "$FROM" 0 "$BS" "$TO" "$BS"
|
||||
regenerate_files
|
||||
move_and_compare "$FROM" 0 "$LEN" "$TO" 0
|
||||
regenerate_files
|
||||
|
||||
echo "== moving final partial block sets partial i_size"
|
||||
move_and_compare "$FROM" $((LEN - PART)) "$PART" "$TO" 0
|
||||
stat -c '%s' "$TO"
|
||||
regenerate_files
|
||||
|
||||
echo "== moving updates inode fields"
|
||||
declare -A from_before from_after to_before to_after
|
||||
set_updated_fields from_before "$FROM"
|
||||
set_updated_fields to_before "$TO"
|
||||
t_quiet sync
|
||||
sleep 1
|
||||
move_and_compare "$FROM" 0 "$BS" "$TO" 0
|
||||
set_updated_fields from_after "$FROM"
|
||||
set_updated_fields to_after "$TO"
|
||||
for k in ${!from_after[@]}; do
|
||||
if [ "${from_before[$k]}" == "${from_after[$k]}" ]; then
|
||||
echo "move didn't change from $k ${from_before[$k]}"
|
||||
fi
|
||||
if [ "${to_before[$k]}" == "${to_after[$k]}" ]; then
|
||||
echo "move didn't change to $k ${to_before[$k]}"
|
||||
fi
|
||||
done
|
||||
regenerate_files
|
||||
|
||||
echo "== moving blocks backwards works"
|
||||
cp "$FROM" "$FROM.orig"
|
||||
move_and_compare "$FROM" $((LEN - PART)) "$PART" "$TO" $((LEN - PART))
|
||||
for i in $(seq $((BLOCKS - 1)) -1 0); do
|
||||
move_and_compare "$FROM" $((i * BS)) "$BS" "$TO" $((i * BS))
|
||||
done
|
||||
cmp "$TO" "$FROM.orig"
|
||||
regenerate_files
|
||||
|
||||
echo "== combine many files into one"
|
||||
for i in $(seq 0 $((PIECES - 1))); do
|
||||
dd if=/dev/urandom of="$FROM.$i" bs=$BS count=$BLOCKS status=none
|
||||
cat "$FROM.$i" >> "$TO.large"
|
||||
move_and_compare "$FROM.$i" 0 "$((BS * BLOCKS))" \
|
||||
"$TO" $((i * BS * BLOCKS))
|
||||
done
|
||||
((i++))
|
||||
cat "$FROM" >> "$TO.large"
|
||||
move_and_compare "$FROM" 0 "$LEN" "$TO" $((i * BS * BLOCKS))
|
||||
cmp "$TO.large" "$TO"
|
||||
|
||||
t_pass
|
||||
@@ -24,7 +24,7 @@ expect_wait()
|
||||
shift
|
||||
done
|
||||
|
||||
scoutfs data-waiting 0 0 "$file" > $T_TMP.wait.output
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$file" > $T_TMP.wait.output
|
||||
diff -u $T_TMP.wait.expected $T_TMP.wait.output
|
||||
}
|
||||
|
||||
@@ -37,9 +37,9 @@ ino=$(stat -c "%i" "$DIR/file")
|
||||
vers=$(scoutfs stat -s data_version "$DIR/file")
|
||||
|
||||
echo "== waiter shows up in ioctl"
|
||||
echo "offline wating should be empty:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
echo "offline waiting should be empty:"
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
cat "$DIR/file" > /dev/null &
|
||||
sleep .1
|
||||
echo "offline waiting should now have one known entry:"
|
||||
@@ -58,13 +58,13 @@ echo "offline waiting now has two known entries:"
|
||||
expect_wait "$DIR/file" "read" $ino 0 $ino 1
|
||||
|
||||
echo "== staging wakes everyone"
|
||||
scoutfs stage "$DIR/file" "$vers" 0 $BYTES "$DIR/golden"
|
||||
scoutfs stage "$DIR/golden" "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
sleep .1
|
||||
echo "offline wating should be empty again:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
echo "offline waiting should be empty again:"
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
|
||||
echo "== interruption does no harm"
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
cat "$DIR/file" > /dev/null 2>&1 &
|
||||
pid="$!"
|
||||
sleep .1
|
||||
@@ -74,7 +74,7 @@ kill "$pid"
|
||||
# silence terminated message
|
||||
wait "$pid" 2> /dev/null
|
||||
echo "offline waiting should be empty again:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
|
||||
echo "== EIO injection for waiting readers works"
|
||||
ino=$(stat -c "%i" "$DIR/file")
|
||||
@@ -86,23 +86,23 @@ dd if="$DIR/file" bs=$BS skip=1 of=/dev/null 2>&1 | \
|
||||
pid2="$!"
|
||||
sleep .1
|
||||
echo "offline waiting should now have two known entries:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
expect_wait "$DIR/file" "read" $ino 0 $ino 1
|
||||
scoutfs data-wait-err "$DIR" "$ino" "$vers" 0 $((BS*2)) read -5
|
||||
scoutfs data-wait-err -p "$DIR" -I "$ino" -V "$vers" -F 0 -C $((BS*2)) -O read -E -5
|
||||
sleep .1
|
||||
echo "offline waiting should now have 0 known entries:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
# silence terminated message
|
||||
wait "$pid" 2> /dev/null
|
||||
wait "$pid2" 2> /dev/null
|
||||
cat $T_TMP.cat1
|
||||
cat $T_TMP.cat2
|
||||
echo "offline waiting should be empty again:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
|
||||
echo "== readahead while offline does no harm"
|
||||
xfs_io -c "fadvise -w 0 $BYTES" "$DIR/file"
|
||||
scoutfs stage "$DIR/file" "$vers" 0 $BYTES "$DIR/golden"
|
||||
scoutfs stage "$DIR/golden" "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
cmp "$DIR/file" "$DIR/golden"
|
||||
|
||||
echo "== waiting on interesting blocks works"
|
||||
@@ -113,65 +113,65 @@ for base in $(echo 0 $(($BLOCKS / 2)) $(($BLOCKS - 2))); do
|
||||
done
|
||||
done
|
||||
for b in $blocks; do
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
dd if="$DIR/file" of=/dev/null \
|
||||
status=none bs=$BS count=1 skip=$b 2> /dev/null &
|
||||
sleep .1
|
||||
scoutfs stage "$DIR/file" "$vers" 0 $BYTES "$DIR/golden"
|
||||
scoutfs stage "$DIR/golden" "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
sleep .1
|
||||
echo "offline waiting is empty at block $b"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
done
|
||||
|
||||
echo "== contents match when staging blocks forward"
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
cat "$DIR/file" > "$DIR/forward" &
|
||||
for b in $(seq 0 1 $((BLOCKS - 1))); do
|
||||
dd if="$DIR/golden" of="$DIR/block" status=none bs=$BS skip=$b count=1
|
||||
scoutfs stage "$DIR/file" "$vers" $((b * $BS)) $BS "$DIR/block"
|
||||
scoutfs stage "$DIR/block" "$DIR/file" -V "$vers" -o $((b * $BS)) -l $BS
|
||||
done
|
||||
sleep .1
|
||||
cmp "$DIR/golden" "$DIR/forward"
|
||||
|
||||
echo "== contents match when staging blocks backwards"
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
cat "$DIR/file" > "$DIR/backward" &
|
||||
for b in $(seq $((BLOCKS - 1)) -1 0); do
|
||||
dd if="$DIR/golden" of="$DIR/block" status=none bs=$BS skip=$b count=1
|
||||
scoutfs stage "$DIR/file" "$vers" $((b * $BS)) $BS "$DIR/block"
|
||||
scoutfs stage "$DIR/block" "$DIR/file" -V "$vers" -o $((b * $BS)) -l $BS
|
||||
done
|
||||
sleep .1
|
||||
cmp "$DIR/golden" "$DIR/backward"
|
||||
|
||||
echo "== truncate to same size doesn't wait"
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
truncate -s "$BYTES" "$DIR/file" &
|
||||
sleep .1
|
||||
echo "offline wating should be empty:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
|
||||
echo "== truncating does wait"
|
||||
truncate -s "$BS" "$DIR/file" &
|
||||
sleep .1
|
||||
echo "truncate should be waiting for first block:"
|
||||
expect_wait "$DIR/file" "change_size" $ino 0
|
||||
scoutfs stage "$DIR/file" "$vers" 0 $BYTES "$DIR/golden"
|
||||
scoutfs stage "$DIR/golden" "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
sleep .1
|
||||
echo "trunate should no longer be waiting:"
|
||||
scoutfs data-waiting 0 0 "$DIR" | wc -l
|
||||
scoutfs data-waiting -B 0 -I 0 -p "$DIR" | wc -l
|
||||
cat "$DIR/golden" > "$DIR/file"
|
||||
vers=$(scoutfs stat -s data_version "$DIR/file")
|
||||
|
||||
echo "== writing waits"
|
||||
dd if=/dev/urandom of="$DIR/other" bs=$BS count=$BLOCKS status=none
|
||||
scoutfs release "$DIR/file" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
# overwrite, not truncate+write
|
||||
dd if="$DIR/other" of="$DIR/file" \
|
||||
bs=$BS count=$BLOCKS conv=notrunc status=none &
|
||||
sleep .1
|
||||
echo "should be waiting for write"
|
||||
expect_wait "$DIR/file" "write" $ino 0
|
||||
scoutfs stage "$DIR/file" "$vers" 0 $BYTES "$DIR/golden"
|
||||
scoutfs stage "$DIR/golden" "$DIR/file" -V "$vers" -o 0 -l $BYTES
|
||||
cmp "$DIR/file" "$DIR/other"
|
||||
|
||||
echo "== cleanup"
|
||||
|
||||
@@ -8,63 +8,63 @@ FILE="$T_D0/file"
|
||||
|
||||
echo "== 0 data_version arg fails"
|
||||
touch "$FILE"
|
||||
scoutfs setattr -d 0 -s 1 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 0 -s 1 "$FILE" 2>&1 | t_filter_fs
|
||||
rm "$FILE"
|
||||
|
||||
echo "== args must specify size and offline"
|
||||
touch "$FILE"
|
||||
scoutfs setattr -d 1 -o -s 0 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -o -s 0 "$FILE" 2>&1 | t_filter_fs
|
||||
rm "$FILE"
|
||||
|
||||
echo "== only works on regular files"
|
||||
mkdir "$T_D0/dir"
|
||||
scoutfs setattr -d 1 -s 1 -f "$T_D0/dir" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -s 1 "$T_D0/dir" 2>&1 | t_filter_fs
|
||||
rmdir "$T_D0/dir"
|
||||
mknod "$T_D0/char" c 1 3
|
||||
scoutfs setattr -d 1 -s 1 -f "$T_D0/char" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -s 1 "$T_D0/char" 2>&1 | t_filter_fs
|
||||
rm "$T_D0/char"
|
||||
|
||||
echo "== non-zero file size fails"
|
||||
echo contents > "$FILE"
|
||||
scoutfs setattr -d 1 -s 1 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -s 1 "$FILE" 2>&1 | t_filter_fs
|
||||
rm "$FILE"
|
||||
|
||||
echo "== non-zero file data_version fails"
|
||||
touch "$FILE"
|
||||
truncate -s 1M "$FILE"
|
||||
truncate -s 0 "$FILE"
|
||||
scoutfs setattr -d 1 -o -s 1 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -o -s 1 "$FILE" 2>&1 | t_filter_fs
|
||||
rm "$FILE"
|
||||
|
||||
echo "== large size is set"
|
||||
touch "$FILE"
|
||||
scoutfs setattr -d 1 -s 578437695752307201 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -s 578437695752307201 "$FILE" 2>&1 | t_filter_fs
|
||||
stat -c "%s" "$FILE"
|
||||
rm "$FILE"
|
||||
|
||||
echo "== large data_version is set"
|
||||
touch "$FILE"
|
||||
scoutfs setattr -d 578437695752307201 -s 1 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 578437695752307201 -s 1 "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs stat -s data_version "$FILE"
|
||||
rm "$FILE"
|
||||
|
||||
echo "== large ctime is set"
|
||||
touch "$FILE"
|
||||
# only doing 32bit sec 'cause stat gets confused
|
||||
scoutfs setattr -c 67305985.999999999 -d 1 -s 1 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -t 67305985.999999999 -V 1 -s 1 "$FILE" 2>&1 | t_filter_fs
|
||||
TZ=GMT stat -c "%z" "$FILE"
|
||||
rm "$FILE"
|
||||
|
||||
echo "== large offline extents are created"
|
||||
touch "$FILE"
|
||||
scoutfs setattr -d 1 -o -s $((10007 * 4096)) -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -o -s $((10007 * 4096)) "$FILE" 2>&1 | t_filter_fs
|
||||
filefrag -v -b4096 "$FILE" 2>&1 | t_filter_fs
|
||||
rm "$FILE"
|
||||
|
||||
# had a bug where we were creating extents that were too long
|
||||
echo "== correct offline extent length"
|
||||
touch "$FILE"
|
||||
scoutfs setattr -d 1 -o -s 4000000000 -f "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs setattr -V 1 -o -s 4000000000 "$FILE" 2>&1 | t_filter_fs
|
||||
scoutfs stat -s offline_blocks "$FILE"
|
||||
rm "$FILE"
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ query_index() {
|
||||
local first="${2:-0}"
|
||||
local last="${3:--1}"
|
||||
|
||||
scoutfs walk-inodes $which $first $last "$T_M0"
|
||||
scoutfs walk-inodes -p "$T_M0" -- $which $first $last
|
||||
}
|
||||
|
||||
# print the major in the index for the ino if it's found
|
||||
@@ -22,7 +22,7 @@ ino_major() {
|
||||
local which="$1"
|
||||
local ino="$2"
|
||||
|
||||
scoutfs walk-inodes $which 0 -1 "$T_M0" | \
|
||||
scoutfs walk-inodes -p "$T_M0" -- $which 0 -1 | \
|
||||
awk '($4 == "'$ino'") {print $2}'
|
||||
}
|
||||
|
||||
|
||||
@@ -23,14 +23,14 @@ create_file() {
|
||||
release_vers() {
|
||||
local file="$1"
|
||||
local vers="$2"
|
||||
local block="$3"
|
||||
local count="$4"
|
||||
local offset="$3"
|
||||
local length="$4"
|
||||
|
||||
if [ "$vers" == "stat" ]; then
|
||||
vers=$(scoutfs stat -s data_version "$file")
|
||||
fi
|
||||
|
||||
scoutfs release "$file" "$vers" "$block" "$count"
|
||||
scoutfs release "$file" -V "$vers" -o "$offset" -l "$length"
|
||||
}
|
||||
|
||||
FILE="$T_D0/file"
|
||||
@@ -38,41 +38,41 @@ CHAR="$FILE-char"
|
||||
|
||||
echo "== simple whole file multi-block releasing"
|
||||
create_file "$FILE" 65536
|
||||
release_vers "$FILE" stat 0 16
|
||||
release_vers "$FILE" stat 0 64K
|
||||
rm "$FILE"
|
||||
|
||||
echo "== release last block that straddles i_size"
|
||||
create_file "$FILE" 6144
|
||||
release_vers "$FILE" stat 1 1
|
||||
release_vers "$FILE" stat 4K 4K
|
||||
rm "$FILE"
|
||||
|
||||
echo "== release entire file past i_size"
|
||||
create_file "$FILE" 8192
|
||||
release_vers "$FILE" stat 0 100
|
||||
release_vers "$FILE" stat 0 400K
|
||||
# not deleting for the following little tests
|
||||
|
||||
echo "== releasing offline extents is fine"
|
||||
release_vers "$FILE" stat 0 100
|
||||
release_vers "$FILE" stat 0 400K
|
||||
|
||||
echo "== 0 count is fine"
|
||||
release_vers "$FILE" stat 0 0
|
||||
|
||||
echo "== release past i_size is fine"
|
||||
release_vers "$FILE" stat 100 1
|
||||
release_vers "$FILE" stat 400K 4K
|
||||
|
||||
echo "== wrapped blocks fails"
|
||||
release_vers "$FILE" stat $vers 0x8000000000000000 0x8000000000000000
|
||||
|
||||
echo "== releasing non-file fails"
|
||||
mknod "$CHAR" c 1 3
|
||||
release_vers "$CHAR" stat 0 1 2>&1 | t_filter_fs
|
||||
release_vers "$CHAR" stat 0 4K 2>&1 | t_filter_fs
|
||||
rm "$CHAR"
|
||||
|
||||
echo "== releasing a non-scoutfs file fails"
|
||||
release_vers "/dev/null" stat 0 1
|
||||
release_vers "/dev/null" stat 0 4K
|
||||
|
||||
echo "== releasing bad version fails"
|
||||
release_vers "$FILE" 0 0 1
|
||||
release_vers "$FILE" 0 0 4K
|
||||
|
||||
rm "$FILE"
|
||||
|
||||
@@ -108,9 +108,9 @@ for c in $(seq 0 4); do
|
||||
start=$(fiemap_file "$FILE" | \
|
||||
awk '($1 == "0:"){print substr($4, 0, length($4)- 2)}')
|
||||
|
||||
release_vers "$FILE" stat $a 1
|
||||
release_vers "$FILE" stat $b 1
|
||||
release_vers "$FILE" stat $c 1
|
||||
release_vers "$FILE" stat $(($a * 4))K 4K
|
||||
release_vers "$FILE" stat $(($b * 4))K 4K
|
||||
release_vers "$FILE" stat $(($c * 4))K 4K
|
||||
|
||||
echo -n "$a $b $c:"
|
||||
|
||||
|
||||
@@ -29,14 +29,14 @@ create_file() {
|
||||
release_vers() {
|
||||
local file="$1"
|
||||
local vers="$2"
|
||||
local block="$3"
|
||||
local count="$4"
|
||||
local offset="$3"
|
||||
local length="$4"
|
||||
|
||||
if [ "$vers" == "stat" ]; then
|
||||
vers=$(scoutfs stat -s data_version "$file")
|
||||
fi
|
||||
|
||||
scoutfs release "$file" "$vers" "$block" "$count"
|
||||
scoutfs release "$file" -V "$vers" -o "$offset" -l "$length"
|
||||
}
|
||||
|
||||
# if vers is "stat" then we ask stat_more for the data_version
|
||||
@@ -44,14 +44,14 @@ stage_vers() {
|
||||
local file="$1"
|
||||
local vers="$2"
|
||||
local offset="$3"
|
||||
local count="$4"
|
||||
local length="$4"
|
||||
local contents="$5"
|
||||
|
||||
if [ "$vers" == "stat" ]; then
|
||||
vers=$(scoutfs stat -s data_version "$file")
|
||||
fi
|
||||
|
||||
scoutfs stage "$file" "$vers" "$offset" "$count" "$contents"
|
||||
scoutfs stage "$contents" "$file" -V "$vers" -o "$offset" -l "$length"
|
||||
}
|
||||
|
||||
FILE="$T_D0/file"
|
||||
@@ -60,7 +60,7 @@ CHAR="$FILE-char"
|
||||
echo "== create/release/stage single block file"
|
||||
create_file "$FILE" 4096
|
||||
cp "$FILE" "$T_TMP"
|
||||
release_vers "$FILE" stat 0 1
|
||||
release_vers "$FILE" stat 0 4K
|
||||
# make sure there only offline extents
|
||||
fiemap_file "$FILE" | grep "^[ 0-9]*:" | grep -v "unknown"
|
||||
stage_vers "$FILE" stat 0 4096 "$T_TMP"
|
||||
@@ -70,7 +70,7 @@ rm -f "$FILE"
|
||||
echo "== create/release/stage larger file"
|
||||
create_file "$FILE" $((4096 * 4096))
|
||||
cp "$FILE" "$T_TMP"
|
||||
release_vers "$FILE" stat 0 4096
|
||||
release_vers "$FILE" stat 0 16M
|
||||
# make sure there only offline extents
|
||||
fiemap_file "$FILE" | grep "^[ 0-9]*:" | grep -v "unknown"
|
||||
stage_vers "$FILE" stat 0 $((4096 * 4096)) "$T_TMP"
|
||||
@@ -83,7 +83,7 @@ cp "$FILE" "$T_TMP"
|
||||
nr=1
|
||||
while [ "$nr" -lt 10 ]; do
|
||||
echo "attempt $nr" >> $seqres.full 2>&1
|
||||
release_vers "$FILE" stat 0 1024
|
||||
release_vers "$FILE" stat 0 4096K
|
||||
sync
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
stage_vers "$FILE" stat 0 $((4096 * 1024)) "$T_TMP"
|
||||
@@ -100,7 +100,7 @@ sync
|
||||
stat "$FILE" > "$T_TMP.before"
|
||||
scoutfs stat -s data_seq "$FILE" >> "$T_TMP.before"
|
||||
scoutfs stat -s data_version "$FILE" >> "$T_TMP.before"
|
||||
release_vers "$FILE" stat 0 1
|
||||
release_vers "$FILE" stat 0 4K
|
||||
stage_vers "$FILE" stat 0 4096 "$T_TMP"
|
||||
stat "$FILE" > "$T_TMP.after"
|
||||
scoutfs stat -s data_seq "$FILE" >> "$T_TMP.after"
|
||||
@@ -110,7 +110,7 @@ rm -f "$FILE"
|
||||
|
||||
echo "== stage does change meta_seq"
|
||||
create_file "$FILE" 4096
|
||||
release_vers "$FILE" stat 0 1
|
||||
release_vers "$FILE" stat 0 4K
|
||||
sync
|
||||
before=$(scoutfs stat -s meta_seq "$FILE")
|
||||
stage_vers "$FILE" stat 0 4096 "$T_TMP"
|
||||
@@ -121,7 +121,7 @@ rm -f "$FILE"
|
||||
# XXX this now waits, demand staging should be own test
|
||||
#echo "== can't write to offline"
|
||||
#create_file "$FILE" 4096
|
||||
#release_vers "$FILE" stat 0 1
|
||||
#release_vers "$FILE" stat 0 4K
|
||||
## make sure there only offline extents
|
||||
#fiemap_file "$FILE" | grep "^[ 0-9]*:" | grep -v "unknown"
|
||||
#dd if=/dev/zero of="$FILE" conv=notrunc bs=4096 count=1 2>&1 | t_filter_fs
|
||||
@@ -144,13 +144,13 @@ rm -f "$FILE"
|
||||
|
||||
echo "== wrapped region fails"
|
||||
create_file "$FILE" 4096
|
||||
stage_vers "$FILE" stat 0xFFFFFFFFFFFFFFFF 4096 /dev/zero
|
||||
stage_vers "$FILE" stat 0xFFFFFFFFFFFFF000 4096 /dev/zero
|
||||
rm -f "$FILE"
|
||||
|
||||
echo "== non-block aligned offset fails"
|
||||
create_file "$FILE" 4096
|
||||
cp "$FILE" "$T_TMP"
|
||||
release_vers "$FILE" stat 0 1
|
||||
release_vers "$FILE" stat 0 4K
|
||||
stage_vers "$FILE" stat 1 4095 "$T_TMP"
|
||||
fiemap_file "$FILE" | grep "^[ 0-9]*:" | grep -v "unknown"
|
||||
rm -f "$FILE"
|
||||
@@ -158,7 +158,7 @@ rm -f "$FILE"
|
||||
echo "== non-block aligned len within block fails"
|
||||
create_file "$FILE" 4096
|
||||
cp "$FILE" "$T_TMP"
|
||||
release_vers "$FILE" stat 0 1
|
||||
release_vers "$FILE" stat 0 4K
|
||||
stage_vers "$FILE" stat 0 1024 "$T_TMP"
|
||||
fiemap_file "$FILE" | grep "^[ 0-9]*:" | grep -v "unknown"
|
||||
rm -f "$FILE"
|
||||
@@ -166,14 +166,14 @@ rm -f "$FILE"
|
||||
echo "== partial final block that writes to i_size does work"
|
||||
create_file "$FILE" 2048
|
||||
cp "$FILE" "$T_TMP"
|
||||
release_vers "$FILE" stat 0 1
|
||||
release_vers "$FILE" stat 0 4K
|
||||
stage_vers "$FILE" stat 0 2048 "$T_TMP"
|
||||
cmp "$FILE" "$T_TMP"
|
||||
rm -f "$FILE"
|
||||
|
||||
echo "== zero length stage doesn't bring blocks online"
|
||||
create_file "$FILE" $((4096 * 100))
|
||||
release_vers "$FILE" stat 0 100
|
||||
release_vers "$FILE" stat 0 400K
|
||||
stage_vers "$FILE" stat 4096 0 /dev/zero
|
||||
fiemap_file "$FILE" | grep "^[ 0-9]*:" | grep -v "unknown"
|
||||
rm -f "$FILE"
|
||||
@@ -188,7 +188,7 @@ rm -f "$FILE"
|
||||
#create_file "$FILE" 4096
|
||||
#cp "$FILE" "$T_TMP"
|
||||
#sync
|
||||
#release_vers "$FILE" stat 0 1
|
||||
#release_vers "$FILE" stat 0 4K
|
||||
#md5sum "$FILE" 2>&1 | t_filter_fs
|
||||
#stage_vers "$FILE" stat 0 4096 "$T_TMP"
|
||||
#cmp "$FILE" "$T_TMP"
|
||||
|
||||
@@ -17,7 +17,7 @@ diff_srch_find()
|
||||
local n="$1"
|
||||
|
||||
sync
|
||||
scoutfs search-xattrs -n "$n" -f "$T_M0" > "$T_TMP.srch"
|
||||
scoutfs search-xattrs "$n" -p "$T_M0" > "$T_TMP.srch"
|
||||
find_xattrs -d "$T_D0" -m "$T_M0" -n "$n" > "$T_TMP.find"
|
||||
|
||||
diff -u "$T_TMP.srch" "$T_TMP.find"
|
||||
|
||||
69
tests/tests/stage-multi-part.sh
Normal file
69
tests/tests/stage-multi-part.sh
Normal file
@@ -0,0 +1,69 @@
|
||||
#
|
||||
# Stage a large file in multiple parts and have a reader read it while
|
||||
# it's being staged. This has found problems with extent access
|
||||
# locking.
|
||||
#
|
||||
|
||||
t_require_commands scoutfs perl cmp rm
|
||||
|
||||
FILE_BYTES=$((4 * 1024 * 1024 * 1024))
|
||||
FILE_BLOCKS=$((FILE_BYTES / 4096))
|
||||
FRAG_BYTES=$((128 * 1024 * 1024))
|
||||
FRAG_BLOCKS=$((FRAG_BYTES / 4096))
|
||||
NR_FRAGS=$((FILE_BLOCKS / FRAG_BLOCKS))
|
||||
|
||||
#
|
||||
# high bandwidth way to generate file contents with predictable
|
||||
# contents. We use ascii lines with the block identity, padded to 4KB
|
||||
# with spaces.
|
||||
#
|
||||
# $1 is number of 4k blocks to write, and each block gets its block
|
||||
# number in the line. $2, $3, and $4 are fields that are put in every
|
||||
# block.
|
||||
#
|
||||
gen() {
|
||||
perl -e 'for (my $i = 0; $i < '$1'; $i++) { printf("mount %020u process %020u file %020u blkno %020u%s\n", '$2', '$3', '$4', $i, " " x 3987); }'
|
||||
}
|
||||
|
||||
release_file() {
|
||||
local path="$1"
|
||||
local vers=$(scoutfs stat -s data_version "$path")
|
||||
|
||||
scoutfs release "$path" -V "$vers" -o 0 -l $FILE_BYTES
|
||||
}
|
||||
|
||||
stage_file() {
|
||||
local path="$1"
|
||||
local vers=$(scoutfs stat -s data_version "$path")
|
||||
local off=0
|
||||
|
||||
for a in $(seq 1 $NR_FRAGS); do
|
||||
scoutfs stage <(gen $FRAG_BLOCKS $a $a $a) "$path" -V "$vers" \
|
||||
-o $off -l $FRAG_BYTES
|
||||
((off+=$FRAG_BYTES))
|
||||
done
|
||||
}
|
||||
|
||||
FILE="$T_D0/file"
|
||||
|
||||
whole_file() {
|
||||
for a in $(seq 1 $NR_FRAGS); do
|
||||
gen $FRAG_BLOCKS $a $a $a
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# just one pass through the file.
|
||||
#
|
||||
|
||||
whole_file > "$FILE"
|
||||
release_file "$FILE"
|
||||
|
||||
cmp "$FILE" <(whole_file) &
|
||||
pid=$!
|
||||
|
||||
stage_file "$FILE"
|
||||
|
||||
wait $pid || t_fail "comparison failed"
|
||||
|
||||
t_pass
|
||||
@@ -15,7 +15,7 @@ release_file() {
|
||||
local vers=$(scoutfs stat -s data_version "$path")
|
||||
|
||||
echo "releasing $path" >> "$T_TMP.log"
|
||||
scoutfs release "$path" "$vers" 0 $BLOCKS
|
||||
scoutfs release "$path" -V "$vers" -o 0 -l $BYTES
|
||||
echo "released $path" >> "$T_TMP.log"
|
||||
}
|
||||
|
||||
@@ -24,8 +24,8 @@ stage_file() {
|
||||
local vers=$(scoutfs stat -s data_version "$path")
|
||||
|
||||
echo "staging $path" >> "$T_TMP.log"
|
||||
scoutfs stage "$path" "$vers" 0 $BYTES \
|
||||
"$DIR/good/$(basename $path)"
|
||||
scoutfs stage "$DIR/good/$(basename $path)" "$path" -V "$vers" -o 0 -l $BYTES
|
||||
|
||||
echo "staged $path" >> "$T_TMP.log"
|
||||
}
|
||||
|
||||
|
||||
15
tests/tests/stage-tmpfile.sh
Normal file
15
tests/tests/stage-tmpfile.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# Run tmpfile_stage and check the output with hexdump.
|
||||
#
|
||||
|
||||
t_require_commands stage_tmpfile hexdump
|
||||
|
||||
DEST_FILE="$T_D0/dest_file"
|
||||
|
||||
stage_tmpfile $T_D0 $DEST_FILE
|
||||
|
||||
hexdump -C "$DEST_FILE"
|
||||
|
||||
rm -fr "$DEST_FILE"
|
||||
|
||||
t_pass
|
||||
@@ -1,40 +0,0 @@
|
||||
#
|
||||
# verify stale btree block reading
|
||||
#
|
||||
|
||||
t_require_commands touch stat setfattr getfattr createmany
|
||||
t_require_mounts 2
|
||||
|
||||
GETFATTR="getfattr --absolute-names"
|
||||
SETFATTR="setfattr"
|
||||
|
||||
#
|
||||
# This exercises the soft retry of btree blocks when
|
||||
# inconsistent cached versions are found. It ensures that basic hard
|
||||
# error returning turns into EIO in the case where the persistent reread
|
||||
# blocks and segments really are inconsistent.
|
||||
#
|
||||
# The triggers apply across all execution in the file system. So to
|
||||
# trigger btree block retries in the client we make sure that the server
|
||||
# is running on the other node.
|
||||
#
|
||||
|
||||
cl=$(t_first_client_nr)
|
||||
sv=$(t_server_nr)
|
||||
eval cl_dir="\$T_D${cl}"
|
||||
eval sv_dir="\$T_D${sv}"
|
||||
|
||||
echo "== create file for xattr ping pong"
|
||||
touch "$sv_dir/file"
|
||||
$SETFATTR -n user.xat -v initial "$sv_dir/file"
|
||||
$GETFATTR -n user.xat "$sv_dir/file" 2>&1 | t_filter_fs
|
||||
|
||||
echo "== retry btree block read"
|
||||
$SETFATTR -n user.xat -v btree "$sv_dir/file"
|
||||
t_trigger_arm btree_stale_read $cl
|
||||
old=$(t_counter btree_stale_read $cl)
|
||||
$GETFATTR -n user.xat "$cl_dir/file" 2>&1 | t_filter_fs
|
||||
t_trigger_show btree_stale_read "after" $cl
|
||||
t_counter_diff btree_stale_read $old $cl
|
||||
|
||||
t_pass
|
||||
@@ -37,17 +37,25 @@ t_quiet make
|
||||
t_quiet sync
|
||||
# pwd stays in xfstests dir to build config and run
|
||||
|
||||
#
|
||||
# Each filesystem needs specific mkfs and mount options because we put
|
||||
# quorum member addresess in mkfs options and the metadata device in
|
||||
# mount options.
|
||||
#
|
||||
cat << EOF > local.config
|
||||
export FSTYP=scoutfs
|
||||
export MKFS_OPTIONS="-Q 1"
|
||||
export MKFS_OPTIONS="-f"
|
||||
export MKFS_TEST_OPTIONS="-Q 0,127.0.0.1,42000"
|
||||
export MKFS_SCRATCH_OPTIONS="-Q 0,127.0.0.1,43000"
|
||||
export MKFS_DEV_OPTIONS="-Q 0,127.0.0.1,44000"
|
||||
export TEST_DEV=$T_DB0
|
||||
export TEST_DIR=$T_M0
|
||||
export SCRATCH_META_DEV=$T_EX_META_DEV
|
||||
export SCRATCH_DEV=$T_EX_DATA_DEV
|
||||
export SCRATCH_MNT="$T_TMPDIR/mnt.scratch"
|
||||
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_EX_META_DEV"
|
||||
export MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
|
||||
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_EX_META_DEV"
|
||||
export MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
EOF
|
||||
|
||||
cat << EOF > local.exclude
|
||||
@@ -83,7 +91,7 @@ generic/375 # utils output change? update branch?
|
||||
EOF
|
||||
|
||||
t_restore_output
|
||||
echo "(showing output of xfstests)"
|
||||
echo " (showing output of xfstests)"
|
||||
|
||||
args="-E local.exclude ${T_XFSTESTS_ARGS:--g quick}"
|
||||
./check $args
|
||||
|
||||
@@ -1,25 +1,12 @@
|
||||
#
|
||||
# The userspace utils and kernel module share definitions of physical
|
||||
# structures and ioctls. If we're in the repo we include the kmod
|
||||
# headers directly, and hash them directly to calculate the format hash.
|
||||
#
|
||||
# If we're creating a standalone tarball for distribution we copy the
|
||||
# headers out of the kmod dir into the tarball. And then when we're
|
||||
# building in that tarball we use the headers in src/ directly.
|
||||
#
|
||||
FMTIOC_H := format.h ioctl.h
|
||||
FMTIOC_DIST := $(addprefix src/,$(FMTIOC_H))
|
||||
FMTIOC_KMOD := $(addprefix ../kmod/src/,$(FMTIOC_H))
|
||||
|
||||
ifneq ($(wildcard $(firstword $(FMTIOC_KMOD))),)
|
||||
HASH_FILES := $(FMTIOC_KMOD)
|
||||
else
|
||||
HASH_FILES := $(FMTIOC_DIST)
|
||||
endif
|
||||
SCOUTFS_FORMAT_HASH := $(shell cat $(HASH_FILES) | md5sum | cut -b1-16)
|
||||
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -g -msse4.2 \
|
||||
-Wpadded \
|
||||
-fno-strict-aliasing \
|
||||
-DSCOUTFS_FORMAT_HASH=0x$(SCOUTFS_FORMAT_HASH)LLU
|
||||
|
||||
@@ -47,7 +34,7 @@ endif
|
||||
|
||||
$(BIN): $(OBJ)
|
||||
$(QU) [BIN $@]
|
||||
$(VE)gcc -o $@ $^ -luuid -lm -lcrypto
|
||||
$(VE)gcc -o $@ $^ -luuid -lm -lcrypto -lblkid
|
||||
|
||||
%.o %.d: %.c Makefile sparse.sh
|
||||
$(QU) [CC $<]
|
||||
|
||||
@@ -21,21 +21,19 @@ contains the filesystem's metadata.
|
||||
.sp
|
||||
This option is required.
|
||||
.TP
|
||||
.B server_addr=<ipv4:port>
|
||||
The server_addr option indicates that this mount will participate in
|
||||
quorum election to try and run a server for all the mounts of its
|
||||
filesystem. The option specifies the local TCP IPv4 address that the
|
||||
mount's elected server will listen on for connections from all other
|
||||
mounts of the filesystem.
|
||||
.B quorum_slot_nr=<number>
|
||||
The quorum_slot_nr option assigns a quorum member slot to the mount.
|
||||
The mount will use the slot assignment to claim exclusive ownership of
|
||||
the slot's configured address and an associated metadata device block.
|
||||
Each slot number must be used by only one mount at any given time.
|
||||
.sp
|
||||
The IPv4 address must be specified as a dotted quad, name resolution is
|
||||
not supported. A specific port may be provided after a seperating
|
||||
colon. If no port is specified then a random port will be chosen. The
|
||||
address will be used for the lifetime of the mount and can not be
|
||||
changed. The mount must be unmounted to specify a different address.
|
||||
When a mount is assigned a quorum slot it becomes a quorum member and
|
||||
will participate in the raft leader election process and could start
|
||||
the server for the filesystem if it is elected leader.
|
||||
.sp
|
||||
If server_addr is not specified then the mount will read the filesystem
|
||||
until it sees the address of an elected server to connect to.
|
||||
The assigned number must match one of the slots defined with \-Q options
|
||||
when the filesystem was created with mkfs. If the number assigned
|
||||
doesn't match a number created during mkfs then the mount will fail.
|
||||
.SH FURTHER READING
|
||||
A
|
||||
.B scoutfs
|
||||
|
||||
@@ -3,51 +3,304 @@
|
||||
scoutfs \- scoutfs management utility
|
||||
.SH DESCRIPTION
|
||||
The
|
||||
.b
|
||||
scoutfs
|
||||
utility provides commands to manage a scoutfs filesystem.
|
||||
.B scoutfs
|
||||
utility provides commands to create and manage a ScoutFS filesystem.
|
||||
.SH COMMANDS
|
||||
|
||||
Note: Commands taking the
|
||||
.B --path
|
||||
option will, when the option is omitted, fall back to using the value of the
|
||||
.I SCOUTFS_MOUNT_PATH
|
||||
environment variable. If that variable is also absent the current working
|
||||
directory will be used.
|
||||
|
||||
.TP
|
||||
.BI "counters [\-t\] <sysfs topdir>"
|
||||
.BI "df [-h|--human-readable] [-p|--path PATH]"
|
||||
.sp
|
||||
Displays the counters and their values for a mounted scoutfs filesystem.
|
||||
Each counter and its value are printed on a line to stdout with
|
||||
sufficient spaces seperating the name and value to align the values
|
||||
after
|
||||
Display available and used space on the ScoutFS data and metadata devices.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "\-t"
|
||||
Format the counters into a table that fills the display instead of
|
||||
printing one counter per line. The names and values are padded to
|
||||
create columns that fill the current width of the terminal.
|
||||
.B "-h, --human-readable"
|
||||
Output sizes in human-readable size units (e.g. 500G, 1.2P) rather than number
|
||||
of ScoutFS allocation blocks.
|
||||
.TP
|
||||
.B "sysfs topdir"
|
||||
Specify the mount's sysfs directory in which to find the
|
||||
.B counters/
|
||||
directory when then contains files for each counter.
|
||||
The sysfs directory is typically
|
||||
of the form
|
||||
.I /sys/fs/scoutfs/f.<fsid>.r.<rid>/
|
||||
\&.
|
||||
.B "-p, --path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "data-waiting <ino> <iblock> <path>"
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-slot} NR,ADDR,PORT [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
|
||||
.sp
|
||||
Displays all the files and blocks for which there is a task blocked waiting on
|
||||
Initialize a new ScoutFS filesystem on the target devices. Since ScoutFS uses
|
||||
separate block devices for its metadata and data storage, two are required.
|
||||
.sp
|
||||
If
|
||||
.B --force
|
||||
option is not given, mkfs will check for existing filesystem signatures. It is
|
||||
recommended to use
|
||||
.B wipefs(8)
|
||||
to remove non-ScoutFS filesystem signatures before proceeding, and
|
||||
.B --force
|
||||
to overwrite a previous ScoutFS filesystem.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B META-DEVICE
|
||||
The path to the block device to be used for ScoutFS metadata. If possible, use
|
||||
a faster block device for the metadata device.
|
||||
.TP
|
||||
.B DATA-DEVICE
|
||||
The path to the block device to be used for ScoutFS file data. If possible, use
|
||||
a larger block device for the data device.
|
||||
.TP
|
||||
.B "-Q, --quorum-slot NR,ADDR,PORT"
|
||||
Each \-Q option configures a quorum slot. The NR specifies the number
|
||||
of the slot to configure which must be between 0 and 14. Each slot
|
||||
number must only be used once, but they can be used in any order and
|
||||
they need not be consecutive. This is to allow natural relationships
|
||||
between slot numbers and nodes which may have arbitrary numbering
|
||||
schemes. ADDR and PORT are the numerical IPv4 address and port which
|
||||
will be used as the UDP endpoint for leader elections and as the TCP
|
||||
listening address for server connections. The number of configured
|
||||
slots determines the size of the quorum of member mounts which must be
|
||||
present to start the server for the filesystem to operate. A simple
|
||||
majority is typically required, while one mount is sufficient if only
|
||||
one or two slots are configured. Until the majority quorum are present,
|
||||
all mounts will hang waiting for a server to connect to.
|
||||
.TP
|
||||
.B "-m, --max-meta-size SIZE"
|
||||
Limit the space used by ScoutFS on the metadata device to the
|
||||
given size, rather than using the entire block device. Size is given as
|
||||
an integer followed by a units digit: "K", "M", "G", "T", "P", to denote
|
||||
kibibytes, mebibytes, etc.
|
||||
.TP
|
||||
.B "-d, --max-data-size SIZE"
|
||||
Same as previous, but for limiting the size of the data device.
|
||||
.TP
|
||||
.B "-f, --force"
|
||||
Ignore presence of existing data on the data and metadata devices.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "stat FILE [-s|--single-field FIELD-NAME]"
|
||||
.sp
|
||||
Display ScoutFS-specific metadata fields for the given file.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "FILE"
|
||||
Path to the file.
|
||||
.TP
|
||||
.B "-s, --single-field FIELD-NAME"
|
||||
Only output a single field's value instead of the default: all the stats with
|
||||
one stat per line.
|
||||
.sp
|
||||
.TP
|
||||
.RE
|
||||
.PD
|
||||
The fields are:
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.B "meta_seq"
|
||||
The metadata change sequence. This changes each time the inode's metadata
|
||||
is changed.
|
||||
.TP
|
||||
.B "data_seq"
|
||||
The data change sequence. This changes each time the inode's data
|
||||
is changed.
|
||||
.TP
|
||||
.B "data_version"
|
||||
The data version changes every time the contents of the file changes,
|
||||
or the file grows or shrinks.
|
||||
.TP
|
||||
.B "online_blocks"
|
||||
The number of 4Kb data blocks that contain data and can be read.
|
||||
.TP
|
||||
.B "offline_blocks"
|
||||
The number of 4Kb data blocks that are offline and would need to be
|
||||
staged to be read.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "statfs [-s|--single-field FIELD-NAME] [-p|--path PATH]"
|
||||
.sp
|
||||
Display ScoutFS-specific filesystem-wide metadata fields.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-s, --single-field FIELD-NAME"
|
||||
Only ontput a single stat instead of all the stats with one stat per
|
||||
line. The possible stat names are those given in the output.
|
||||
.TP
|
||||
.B "-p, --path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.sp
|
||||
.TP
|
||||
.RE
|
||||
.PD
|
||||
The fields are:
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.B "fsid"
|
||||
The unique 64bit filesystem identifier for this filesystem.
|
||||
.TP
|
||||
.B "rid"
|
||||
The unique 64bit random identifier for this mount of the filesystem.
|
||||
This is generated for every new mount of the file system.
|
||||
.TP
|
||||
.B "committed_seq"
|
||||
All seqs up to and including this seq have been
|
||||
committed. Can be compared with meta_seq and data_seq from inodes in
|
||||
.B stat
|
||||
to discover if changes to a file have been committed to disk.
|
||||
.TP
|
||||
.B "total_meta_blocks"
|
||||
The total number of 64K metadata blocks in the filesystem.
|
||||
.TP
|
||||
.B "total_data_blocks"
|
||||
The total number of 4K data blocks in the filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "counters [-t|--table] SYSFS-DIR"
|
||||
.sp
|
||||
Display the counters and their values for a mounted ScoutFS filesystem.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.B SYSFS-DIR
|
||||
The mount's sysfs directory in which to find the
|
||||
.B counters/
|
||||
directory when then contains files for each counter.
|
||||
The sysfs directory is
|
||||
of the form
|
||||
.I /sys/fs/scoutfs/f.<fsid>.r.<rid>/
|
||||
\&.
|
||||
.TP
|
||||
.B "-t, --table"
|
||||
Format the counters into a columnar table that fills the width of the display
|
||||
instead of printing one counter per line.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "search-xattrs XATTR-NAME [-p|--path PATH]"
|
||||
.sp
|
||||
Display the inode numbers of inodes in the filesystem which may have
|
||||
an extended attribute with the given name.
|
||||
.sp
|
||||
The results may contain false positives. The returned inode numbers
|
||||
should be checked to verify that the extended attribute is in fact
|
||||
present on the inode.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B XATTR-NAME
|
||||
The full name of the extended attribute to search for as
|
||||
described in the
|
||||
.BR xattr (7)
|
||||
manual page.
|
||||
.TP
|
||||
.B "-p|--path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "list-hidden-xattrs FILE"
|
||||
.sp
|
||||
Display extended attributes starting with the
|
||||
.BR scoutfs.
|
||||
prefix and containing the
|
||||
.BR hide.
|
||||
tag
|
||||
which makes them invisible to
|
||||
.BR listxattr (2) .
|
||||
The names of each attribute are output, one per line. Their order
|
||||
is not specified.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "FILE"
|
||||
The path to a file within a ScoutFS filesystem. File permissions must allow
|
||||
reading.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "walk-inodes {meta_seq|data_seq} FIRST-INODE LAST-INODE [-p|--path PATH]"
|
||||
.sp
|
||||
Walk an inode index in the file system and output the inode numbers
|
||||
that are found between the first and last positions in the index.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.BR meta_seq , data_seq
|
||||
Which index to walk.
|
||||
.TP
|
||||
.B "FIRST-INODE"
|
||||
An integer index value giving starting position of the index walk.
|
||||
.I 0
|
||||
is the first possible position.
|
||||
.TP
|
||||
.B "LAST-INODE"
|
||||
An integer index value giving the last position to include in the index walk.
|
||||
.I \-1
|
||||
can be given to indicate the last possible position.
|
||||
.TP
|
||||
.B "-p|--path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "ino-path INODE-NUM [-p|--path PATH]"
|
||||
.sp
|
||||
Display all paths that reference an inode number.
|
||||
.sp
|
||||
Ongoing filesystem changes, such as renaming a common parent of multiple paths,
|
||||
can cause displayed paths to be inconsistent.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.B "INODE-NUM"
|
||||
The inode number of the target inode.
|
||||
.TP
|
||||
.B "-p|--path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "data-waiting {-I|--inode} INODE-NUM {-B|--block} BLOCK-NUM [-p|--path PATH]"
|
||||
.sp
|
||||
Display all the files and blocks for which there is a task blocked waiting on
|
||||
offline data.
|
||||
.sp
|
||||
The results are sorted by the file's inode number and the
|
||||
logical block offset that is being waited on.
|
||||
.sp
|
||||
Each line of output specifies a block in a file that has a task waiting
|
||||
Each line of output describes a block in a file that has a task waiting
|
||||
and is formatted as:
|
||||
.I "ino <nr> iblock <nr> ops [str]"
|
||||
\&. The ops string indicates blocked operations seperated by commas and can
|
||||
include
|
||||
include
|
||||
.B read
|
||||
for a read operation,
|
||||
.B write
|
||||
@@ -58,156 +311,151 @@ for a truncate or extending write.
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.B "ino"
|
||||
.B "-I, --inode INODE-NUM"
|
||||
Start iterating over waiting tasks from the given inode number.
|
||||
Specifying 0 will show all waiting tasks.
|
||||
Value of 0 will show all waiting tasks.
|
||||
.TP
|
||||
.B "iblock"
|
||||
.B "-B, --block BLOCK-NUM"
|
||||
Start iterating over waiting tasks from the given logical block number
|
||||
in the starting inode. Specifying 0 will show blocks in the first inode
|
||||
in the starting inode. Value of 0 will show blocks in the first inode
|
||||
and then continue to show all blocks with tasks waiting in all the
|
||||
remaining inodes.
|
||||
.TP
|
||||
.B "-p, --path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "data-wait-err {-I|--inode} INODE-NUM {-V|--version} VER-NUM {-F|--offset} OFF-NUM {-C|--count} COUNT {-O|--op} OP {-E|--err} ERR [-p|--path PATH]"
|
||||
.sp
|
||||
Return error from matching waiters.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.B "-C, --count COUNT"
|
||||
Count.
|
||||
.TP
|
||||
.B "-E, --err ERR"
|
||||
Error.
|
||||
.TP
|
||||
.B "-F, --offset OFF-NUM"
|
||||
Offset. May be expressed in bytes, or with KMGTP (Kibi, Mibi, etc.) size
|
||||
suffixes.
|
||||
.TP
|
||||
.B "-I, --inode INODE-NUM"
|
||||
Inode number.
|
||||
.TP
|
||||
.B "-O, --op OP"
|
||||
Operation. One of: "read", "write", "change_size".
|
||||
.TP
|
||||
.B "-p, --path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "stage ARCHIVE-FILE FILE {-V|--version} VERSION [-o, --offset OFF-NUM] [-l, --length LENGTH]"
|
||||
.sp
|
||||
.B Stage
|
||||
(i.e. return to online) the previously-offline contents of a file by copying a
|
||||
region from another file, the archive, and without updating regular inode
|
||||
metadata. Any operations that are blocked by the existence of an offline
|
||||
region will proceed once the region has been staged.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "ARCHIVE-FILE"
|
||||
The source file for the file contents being staged.
|
||||
.TP
|
||||
.B "FILE"
|
||||
The regular file whose contents will be staged.
|
||||
.TP
|
||||
.B "-V, --version VERSION"
|
||||
The data_version of the contents to be staged. It must match the
|
||||
current data_version of the file.
|
||||
.TP
|
||||
.B "-o, --offset OFF-NUM"
|
||||
The starting byte offset of the region to write. May be expressed in bytes, or with
|
||||
KMGTP (Kibi, Mibi, etc.) size suffixes. Default is 0.
|
||||
.TP
|
||||
.B "-l, --length LENGTH"
|
||||
Length of range (bytes or KMGTP units) of file to stage. Default is the file's
|
||||
total size.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "release FILE {-V|--version} VERSION [-o, --offset OFF-NUM] [-l, --length LENGTH]"
|
||||
.sp
|
||||
.B Release
|
||||
the given region of the file. That is, remove the region's backing data and
|
||||
leave an offline data region. Future attempts to read or write the offline
|
||||
region will block until the region is restored by a
|
||||
.B stage
|
||||
write. This is used by userspace archive managers to free data space in the
|
||||
ScoutFS filesystem once the file data has been archived.
|
||||
.sp
|
||||
Note: This only works on regular files with write permission. Releasing regions
|
||||
that are already offline or sparse, including regions extending past the end of
|
||||
the file, will silently succeed.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "path"
|
||||
A path to any inode in the target filesystem, typically the root
|
||||
directory.
|
||||
The path to the regular file whose region will be released.
|
||||
.TP
|
||||
.B "-V, --version VERSION"
|
||||
The data_version of the contents to be released. It must match the current
|
||||
data_version of the file. This ensures that a release operation is truncating
|
||||
the same version of the data that was archived. (Use the
|
||||
.BI "stat"
|
||||
subcommand to obtain data version for a file.)
|
||||
.TP
|
||||
.B "-o, --offset OFF-NUM"
|
||||
The starting byte offset of the region to write. May be expressed in bytes, or with
|
||||
KMGTP (Kibi, Mibi, etc.) size suffixes. Default is 0.
|
||||
.TP
|
||||
.B "-l, --length LENGTH"
|
||||
Length of range (bytes or KMGTP units) of file to stage. Default is the file's
|
||||
total size.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "find-xattrs <\-n\ name> <\-f path>"
|
||||
.BI "setattr FILE [-d, --data-version=VERSION [-s, --size=SIZE [-o, --offline]]] [-t, --ctime=TIMESPEC]"
|
||||
.sp
|
||||
Displays the inode numbers of inodes in the filesystem which may have
|
||||
an extended attribute with the given name.
|
||||
.sp
|
||||
The results may contain false positives. The returned inode numbers
|
||||
should be checked to verify that the extended attribute is in fact
|
||||
present on the inode.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-n name"
|
||||
Specifies the full name of the extended attribute to search for as
|
||||
described in the
|
||||
.BR xattr (7)
|
||||
manual page.
|
||||
.TP
|
||||
.B "-f path"
|
||||
Specifies the path to any inode in the filesystem to search.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "ino-path <ino> <path>"
|
||||
.sp
|
||||
Displays all the paths to links to the given inode number.
|
||||
.sp
|
||||
All the relative paths from the root directory to each link of the
|
||||
target inode are output, one result per line. Each output path is
|
||||
guaranteed to have been a valid path to a link at some point in the
|
||||
past. An individual path won't be corrupted by a rename that occurs
|
||||
during the search. The set of paths can be modified while the search is
|
||||
running. A rename of a parent directory of all the paths, for example,
|
||||
can result in output where the parent directory name component changes
|
||||
in the middle of outputting all the paths.
|
||||
Set ScoutFS-specific attributes on a newly created zero-length file.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.B "ino"
|
||||
The inode number of the target inode to resolve.
|
||||
.B "-V, --data-version=VERSION"
|
||||
Set data version.
|
||||
.TP
|
||||
.B "path"
|
||||
A path to any inode in the target filesystem, typically the root
|
||||
directory.
|
||||
.B "-o, --offline"
|
||||
Set file contents as offline, not sparse. Requires
|
||||
.I --size
|
||||
option also be present.
|
||||
.TP
|
||||
.B "-s, --size=SIZE"
|
||||
Set file size. May be expressed in bytes, or with
|
||||
KMGTP (Kibi, Mibi, etc.) size suffixes. Requires
|
||||
.I --data-version
|
||||
option also be present.
|
||||
.TP
|
||||
.B "-t, --ctime=TIMESPEC"
|
||||
Set creation time using
|
||||
.I "<seconds-since-epoch>.<nanoseconds>"
|
||||
format.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "listxattr-hidden <\-f path>"
|
||||
.sp
|
||||
Displays all the extended attributes starting with the
|
||||
.BR scoutfs.
|
||||
prefix and which contain the
|
||||
.BR hide.
|
||||
tag
|
||||
which makes them invisible to
|
||||
.BR listxattr (2)
|
||||
\&.
|
||||
The names of each attribute are output, one name per line. Their order
|
||||
is determined by internal indexing implementation details and should not
|
||||
be relied on.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-f path"
|
||||
The path to the file whose extended attributes will be listed. The
|
||||
user must have read permission to the inode.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "mkfs <\-Q nr> <meta_dev_path> <data_dev_path> [-M meta_size] [-D data_size]"
|
||||
.sp
|
||||
Initialize a new empty filesystem in the target devices by writing empty
|
||||
structures and a new superblock. Since ScoutFS uses separate block
|
||||
devices for its metadata and data storage, both must be given.
|
||||
.sp
|
||||
This
|
||||
.B unconditionally destroys
|
||||
the contents of the devices, regardless of what they contain or who may be
|
||||
using them. It simply writes new data structures into known offsets.
|
||||
.B Be very careful that the devices do not contain data and are not actively in use.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-Q nr"
|
||||
Specify the number of mounts needed to reach quorum and elect a mount
|
||||
to start the server. Mounts of the filesystem will hang until this many
|
||||
mounts are operational and can elect a server amongst themselves.
|
||||
.sp
|
||||
Mounts with the
|
||||
.B server_addr
|
||||
mount option participate in quorum. The safest quorum number is the
|
||||
smallest majority of an odd number of participating mounts. For
|
||||
example,
|
||||
two out of three total mounts. This ensures that there can only be one
|
||||
set of mounts that can establish quorum.
|
||||
.sp
|
||||
Degenerate quorums are possible, for example by specifying half of an
|
||||
even number of mounts or less than half of the mount count, down to even
|
||||
just one mount establishing quorum. These minority quorums carry the
|
||||
risk of multiple quorums being established concurrently. Each quorum's
|
||||
elected servers race to fence each other and can have the unlikely
|
||||
outcome of continually racing to fence each other resulting in a
|
||||
persistent loss of service.
|
||||
.TP
|
||||
.B "meta_dev_path"
|
||||
The path to the device to be used for ScoutFS metadata. If possible,
|
||||
use a faster block device for the metadata device. Its contents will be
|
||||
unconditionally destroyed.
|
||||
.TP
|
||||
.B "data_dev_path"
|
||||
The path to the device to be used for ScoutFS file data. If possible,
|
||||
use a larger block device for the data device. Its contents will be
|
||||
unconditionally destroyed.
|
||||
.TP
|
||||
.B "-M meta_size"
|
||||
Limit the space used by the filesystem on the metadata device to the
|
||||
given size, rather than using the entire block device. Size is given as
|
||||
an integer followed by a units digit: "K", "M", "G", "T", "P", to denote
|
||||
kibibytes, mebibytes, etc.
|
||||
.TP
|
||||
.B "-D data_size"
|
||||
Same as previous, but for limiting the size of the data device.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "print <path>"
|
||||
.BI "print META-DEVICE"
|
||||
.sp
|
||||
Prints out all of the metadata in the file system. This makes no effort
|
||||
to ensure that the structures are consistent as they're traversed and
|
||||
@@ -217,236 +465,21 @@ output.
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "path"
|
||||
The path to the metadata device for filesystem whose metadata will
|
||||
be printed. The command reads from the buffer cache of the device which
|
||||
may not reflect the current blocks in the filesystem that may have been
|
||||
written through another host or device. The local device's cache can be
|
||||
manually flushed before printing, perhaps with the
|
||||
.B \--flushbufs
|
||||
command in the
|
||||
.BR blockdev (8)
|
||||
command.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "release <path> <vers> <4KB block offset> <4KB block count>"
|
||||
.sp
|
||||
.B Release
|
||||
the given logical block region of the file. That is, truncate away
|
||||
any data blocks but leave behind offline data regions and do not change
|
||||
the main inode metadata. Future attempts to read or write the block
|
||||
region
|
||||
will block until the region is restored by a
|
||||
.B stage
|
||||
write. This is used by userspace archive managers to store file data
|
||||
in a remote archive tier.
|
||||
.sp
|
||||
This only works on regular files and with write permission. Releasing
|
||||
regions that are already offline or are sparse, including past the end
|
||||
of the file, silently succeed.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "path"
|
||||
The path to the regular file whose region will be released.
|
||||
.TP
|
||||
.B "version"
|
||||
The current data version of the contents of the file. This ensures
|
||||
that a release operation is truncating the version of the data that it
|
||||
expects. It can't throw away data that was newly written while it was
|
||||
performing its release operation. An inode's data_version is read
|
||||
by the SCOUTFS_IOC_STATFS_MORE
|
||||
ioctl.
|
||||
.TP
|
||||
.B "4KB block offset"
|
||||
The 64bit logical block offset of the start of the region in units of 4KB.
|
||||
.TP
|
||||
.B "4KB block count"
|
||||
The 64bit length of the region to release in units of 4KB blocks.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "setattr <\-c ctime> <\-d data_version> -o <\-s i_size> <\-f path>
|
||||
.sp
|
||||
Set scoutfs specific metadata on a newly created inode without updating
|
||||
other inode metadata.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-c ctime"
|
||||
Specify the inode's creation GMT timespec with 64bit seconds and 32bit
|
||||
nanoseconds formatted as
|
||||
.B sec.nsec
|
||||
\&.
|
||||
.TP
|
||||
.B "-d data_version"
|
||||
Specify the inode's data version. This can only be set on regular files whose
|
||||
current data_version is 0.
|
||||
.TP
|
||||
.B "-o"
|
||||
Create an offline region for all of the file's data up to the specified
|
||||
file size. This can only be set on regular files whose data_version is
|
||||
0 and i_size must also be specified.
|
||||
.TP
|
||||
.B "-s i_size"
|
||||
Set the inode's i_size. This can only be set on regular files whose
|
||||
data_version is 0.
|
||||
.TP
|
||||
.B "-f path"
|
||||
The file whose metadata will be set.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "stage <file> <vers> <offset> <count> <archive file>"
|
||||
.sp
|
||||
.B Stage
|
||||
the contents of the file by reading a region of another archive file and writing it
|
||||
into the file region without updating regular inode metadata. Any tasks
|
||||
that are blocked by the offline region will proceed once it has been
|
||||
staged.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "file"
|
||||
The regular file whose contents will be staged.
|
||||
.TP
|
||||
.B "vers"
|
||||
The data_version of the contents to be staged. It must match the
|
||||
current data_version of the file.
|
||||
.TP
|
||||
.B "offset"
|
||||
The starting byte offset of the region to write. This must be aligned
|
||||
to 4KB blocks.
|
||||
.TP
|
||||
.B "count"
|
||||
The length of the region to write in bytes. A length of 0 is a noop
|
||||
and will immediately return success. The length must be a multiple
|
||||
of 4KB blocks unless it is writing the final partial block in which
|
||||
case it must end at i_size.
|
||||
.TP
|
||||
.B "archive file"
|
||||
A file whose contents will be read and written as the staged region.
|
||||
The start of the archive file will be used as the start of the region.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "stat [-s single] <path>"
|
||||
.sp
|
||||
Display scoutfs metadata fields for the given inode.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-s single"
|
||||
Only ontput a single stat instead of all the stats with one stat per
|
||||
line. The possible stat names are those given in the output.
|
||||
.TP
|
||||
.B "path"
|
||||
The path to the file whose inode field will be output.
|
||||
.sp
|
||||
.TP
|
||||
.RE
|
||||
.PD
|
||||
The fields are as follows:
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.B "meta_seq"
|
||||
The metadata change sequence. This changes each time the inode's metadata
|
||||
is changed during a mount's transaction.
|
||||
.TP
|
||||
.B "data_seq"
|
||||
The data change sequence. This changes each time the inode's data
|
||||
is changed during a mount's transaction.
|
||||
.TP
|
||||
.B "data_version"
|
||||
The data version changes every time any contents of the file changes,
|
||||
including size changes. It can change many times during a syscall in a
|
||||
transactions.
|
||||
.TP
|
||||
.B "online_blocks"
|
||||
The number of 4Kb data blocks that contain data and can be read.
|
||||
.TP
|
||||
.B "online_blocks"
|
||||
The number of 4Kb data blocks that are offline and would need to be
|
||||
staged to be read.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "statfs [-s single] <path>"
|
||||
.sp
|
||||
Display scoutfs metadata fields for a scoutfs filesystem.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-s single"
|
||||
Only ontput a single stat instead of all the stats with one stat per
|
||||
line. The possible stat names are those given in the output.
|
||||
.TP
|
||||
.B "path"
|
||||
The path to any inode in the filesystem.
|
||||
.sp
|
||||
.TP
|
||||
.RE
|
||||
.PD
|
||||
The fields are as follows:
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.B "fsid"
|
||||
The unique 64bit filesystem identifier for this filesystem.
|
||||
.TP
|
||||
.B "rid"
|
||||
The unique 64bit random identifier for this mount of the filesystem.
|
||||
This is generated for every new mount of the file system.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "walk-inodes <index> <first> <last> <path>"
|
||||
.sp
|
||||
Walks an inode index in the file system and outputs the inode numbers
|
||||
that are found within the first and last positions in the index.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.sp
|
||||
.TP
|
||||
.B "index"
|
||||
Specifies the index to walk. The currently supported indices are
|
||||
.B meta_seq
|
||||
and
|
||||
.B data_seq
|
||||
\&.
|
||||
.TP
|
||||
.B "first"
|
||||
The starting position of the index walk.
|
||||
.I 0
|
||||
is the first possible position in every index.
|
||||
.TP
|
||||
.B "last"
|
||||
The last position to include in the index walk.
|
||||
.I \-1
|
||||
can be given as shorthand for the U64_MAX last possible position in
|
||||
every index.
|
||||
.TP
|
||||
.B "path"
|
||||
A path to any inode in the filesystem, typically the root directory.
|
||||
.B "META-DEVICE"
|
||||
The path to the metadata device for the filesystem whose metadata will be
|
||||
printed. Since this command reads via the host's buffer cache, it may not
|
||||
reflect the current blocks in the filesystem possibly written to the shared
|
||||
block devices from another host, unless
|
||||
.B blockdev \--flushbufs
|
||||
command is used first.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR scoutfs (5),
|
||||
.BR xattr (7).
|
||||
.BR xattr (7),
|
||||
.BR blockdev (8),
|
||||
.BR wipefs (8)
|
||||
|
||||
.SH AUTHORS
|
||||
Zach Brown <zab@versity.com>
|
||||
|
||||
@@ -16,6 +16,7 @@ BuildRequires: git
|
||||
BuildRequires: gzip
|
||||
BuildRequires: libuuid-devel
|
||||
BuildRequires: openssl-devel
|
||||
BuildRequires: libblkid-devel
|
||||
|
||||
#Requires: kmod-scoutfs = %{version}
|
||||
|
||||
|
||||
94
utils/src/blkid.c
Normal file
94
utils/src/blkid.c
Normal file
@@ -0,0 +1,94 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <blkid/blkid.h>
|
||||
|
||||
#include "util.h"
|
||||
#include "format.h"
|
||||
#include "blkid.h"
|
||||
|
||||
static int check_bdev_blkid(int fd, char *devname, char *usage)
|
||||
{
|
||||
blkid_probe pr;
|
||||
int ret = 0;
|
||||
|
||||
pr = blkid_new_probe_from_filename(devname);
|
||||
if (!pr) {
|
||||
fprintf(stderr, "%s: failed to create a new libblkid probe\n", devname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* enable partitions probing (superblocks are enabled by default) */
|
||||
ret = blkid_probe_enable_partitions(pr, true);
|
||||
if (ret == -1) {
|
||||
fprintf(stderr, "%s: blkid_probe_enable_partitions() failed\n", devname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = blkid_do_fullprobe(pr);
|
||||
if (ret == -1) {
|
||||
fprintf(stderr, "%s: blkid_do_fullprobe() failed", devname);
|
||||
goto out;
|
||||
} else if (ret == 0) {
|
||||
const char *type;
|
||||
|
||||
if (!blkid_probe_lookup_value(pr, "TYPE", &type, NULL)) {
|
||||
fprintf(stderr, "%s: appears to contain an existing "
|
||||
"%s superblock\n", devname, type);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!blkid_probe_lookup_value(pr, "PTTYPE", &type, NULL)) {
|
||||
fprintf(stderr, "%s: appears to contain a partition "
|
||||
"table (%s)\n", devname, type);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/* return 0 if ok */
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
blkid_free_probe(pr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_bdev_scoutfs(int fd, char *devname, char *usage)
|
||||
{
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
int ret;
|
||||
|
||||
ret = read_block(fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT, (void **)&super);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (le32_to_cpu(super->hdr.magic) == SCOUTFS_BLOCK_MAGIC_SUPER) {
|
||||
fprintf(stderr, "%s: appears to contain an existing "
|
||||
"ScoutFS superblock\n", devname);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
free(super);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Returns -1 on error, 0 otherwise.
|
||||
*/
|
||||
int check_bdev(int fd, char *devname, char *usage)
|
||||
{
|
||||
return check_bdev_blkid(fd, devname, usage) ?:
|
||||
/* Our sig is not in blkid (yet) so check explicitly for us. */
|
||||
check_bdev_scoutfs(fd, devname, usage);
|
||||
}
|
||||
6
utils/src/blkid.h
Normal file
6
utils/src/blkid.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#ifndef _BLKID_H_
|
||||
#define _BLKID_H_
|
||||
|
||||
int check_bdev(int fd, char *path, char *usage);
|
||||
|
||||
#endif
|
||||
@@ -25,17 +25,13 @@ static void init_block(struct scoutfs_btree_block *bt, int level)
|
||||
*/
|
||||
void btree_init_root_single(struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 seq, __le64 fsid)
|
||||
u64 seq, u64 blkno)
|
||||
{
|
||||
root->ref.blkno = cpu_to_le64(blkno);
|
||||
root->ref.seq = cpu_to_le64(1);
|
||||
root->ref.seq = cpu_to_le64(seq);
|
||||
root->height = 1;
|
||||
|
||||
memset(bt, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
|
||||
bt->hdr.fsid = fsid;
|
||||
bt->hdr.blkno = cpu_to_le64(blkno);
|
||||
bt->hdr.seq = cpu_to_le64(1);
|
||||
|
||||
init_block(bt, 0);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
void btree_init_root_single(struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 seq, __le64 fsid);
|
||||
u64 seq, u64 blkno);
|
||||
|
||||
void btree_append_item(struct scoutfs_btree_block *bt,
|
||||
struct scoutfs_key *key, void *val, int val_len);
|
||||
|
||||
@@ -4,35 +4,37 @@
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "cmd.h"
|
||||
#include "util.h"
|
||||
|
||||
static struct command {
|
||||
static struct argp_command {
|
||||
char *name;
|
||||
char *opts;
|
||||
char *summary;
|
||||
struct argp *argp;
|
||||
int group;
|
||||
int (*func)(int argc, char **argv);
|
||||
} cmds[100], *next_cmd = cmds;
|
||||
} argp_cmds[100], *next_argp_cmd = argp_cmds;
|
||||
|
||||
#define cmd_for_each(com) for (com = cmds; com->func; com++)
|
||||
#define cmd_for_each(com) for (com = argp_cmds; com->func; com++)
|
||||
|
||||
void cmd_register(char *name, char *opts, char *summary,
|
||||
void cmd_register_argp(char *name, struct argp *argp, int group,
|
||||
int (*func)(int argc, char **argv))
|
||||
{
|
||||
struct command *com = next_cmd++;
|
||||
struct argp_command *com = next_argp_cmd++;
|
||||
|
||||
assert((com - cmds) < array_size(cmds));
|
||||
assert((com - argp_cmds) < array_size(argp_cmds));
|
||||
|
||||
com->name = name;
|
||||
com->opts = opts;
|
||||
com->summary = summary;
|
||||
com->argp = argp;
|
||||
com->group = group;
|
||||
com->func = func;
|
||||
}
|
||||
|
||||
static struct command *find_command(char *name)
|
||||
|
||||
static struct argp_command *find_command(char *name)
|
||||
{
|
||||
struct command *com;
|
||||
struct argp_command *com;
|
||||
|
||||
cmd_for_each(com) {
|
||||
if (!strcmp(name, com->name))
|
||||
@@ -42,28 +44,47 @@ static struct command *find_command(char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void usage(void)
|
||||
static void print_cmds_for_group(int group)
|
||||
{
|
||||
struct command *com;
|
||||
struct argp_command *com;
|
||||
int largest = 0;
|
||||
|
||||
fprintf(stderr, "usage: scoutfs <command> [<args>]\n"
|
||||
"Commands:\n");
|
||||
|
||||
/* Base alignment on all groups */
|
||||
cmd_for_each(com)
|
||||
largest = max(strlen(com->name), largest);
|
||||
|
||||
cmd_for_each(com) {
|
||||
fprintf(stderr, " %*s %s\n %*s %s\n",
|
||||
largest, com->name, com->opts,
|
||||
largest, "", com->summary);
|
||||
if (com->group == group) {
|
||||
fprintf(stderr, " %*s %s\n %*s %s\n",
|
||||
largest, com->name, com->argp->args_doc,
|
||||
largest, "", com->argp->doc);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void usage(void)
|
||||
{
|
||||
fprintf(stderr, "usage: scoutfs <command> [<args>]\n\n");
|
||||
fprintf(stderr, "Selected fs defaults to current working directory.\n");
|
||||
fprintf(stderr, "See <command> --help for more details.\n");
|
||||
|
||||
fprintf(stderr, "\nCore admin:\n");
|
||||
print_cmds_for_group(GROUP_CORE);
|
||||
fprintf(stderr, "\nAdditional Information:\n");
|
||||
print_cmds_for_group(GROUP_INFO);
|
||||
fprintf(stderr, "\nSearch Acceleration:\n");
|
||||
print_cmds_for_group(GROUP_SEARCH);
|
||||
fprintf(stderr, "\nArchival Agent Support:\n");
|
||||
print_cmds_for_group(GROUP_AGENT);
|
||||
fprintf(stderr, "\nDebugging commands:\n");
|
||||
print_cmds_for_group(GROUP_DEBUG);
|
||||
}
|
||||
|
||||
/* this returns a positive unix return code on error for some reason */
|
||||
char cmd_execute(int argc, char **argv)
|
||||
{
|
||||
struct command *com = NULL;
|
||||
struct argp_command *com = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc > 1) {
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
#ifndef _CMD_H_
|
||||
#define _CMD_H_
|
||||
|
||||
void cmd_register(char *name, char *opts, char *summary,
|
||||
#define GROUP_CORE 0
|
||||
#define GROUP_INFO 1
|
||||
#define GROUP_SEARCH 2
|
||||
#define GROUP_AGENT 3
|
||||
#define GROUP_DEBUG 4
|
||||
|
||||
void cmd_register_argp(char *name, struct argp *argp, int group,
|
||||
int (*func)(int argc, char **argv));
|
||||
|
||||
char cmd_execute(int argc, char **argv);
|
||||
|
||||
@@ -12,7 +12,10 @@
|
||||
#include <dirent.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <stdbool.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "parse.h"
|
||||
#include "util.h"
|
||||
#include "cmd.h"
|
||||
|
||||
@@ -37,7 +40,12 @@ static int cmp_counter_names(const void *A, const void *B)
|
||||
return strcmp(a->name, b->name);
|
||||
}
|
||||
|
||||
static int counters_cmd(int argc, char **argv)
|
||||
struct counters_args {
|
||||
char *sysfs_path;
|
||||
bool tabular;
|
||||
};
|
||||
|
||||
static int do_counters(struct counters_args *args)
|
||||
{
|
||||
unsigned int *name_wid = NULL;
|
||||
unsigned int *val_wid = NULL;
|
||||
@@ -50,9 +58,7 @@ static int counters_cmd(int argc, char **argv)
|
||||
unsigned int rows = 0;
|
||||
unsigned int cols = 0;
|
||||
unsigned int nr = 0;
|
||||
char *dir_arg = NULL;
|
||||
struct dirent *dent;
|
||||
bool table = false;
|
||||
struct winsize ws;
|
||||
DIR *dirp = NULL;
|
||||
int dir_fd = -1;
|
||||
@@ -64,28 +70,16 @@ static int counters_cmd(int argc, char **argv)
|
||||
int r;
|
||||
int c;
|
||||
|
||||
for (i = 1; i < argc; i++) {
|
||||
if (strcmp(argv[i], "-t") == 0)
|
||||
table = true;
|
||||
else
|
||||
dir_arg = argv[i];
|
||||
}
|
||||
|
||||
ret = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
|
||||
if (ret < 0)
|
||||
ret = ioctl(STDIN_FILENO, TIOCGWINSZ, &ws);
|
||||
if (ret < 0)
|
||||
table = false;
|
||||
args->tabular = false;
|
||||
|
||||
if (dir_arg == NULL) {
|
||||
printf("scoutfs counter-table: need mount sysfs dir (i.e. /sys/fs/scoutfs/$fr)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = snprintf(path, PATH_MAX, "%s/counters", dir_arg);
|
||||
ret = snprintf(path, PATH_MAX, "%s/counters", args->sysfs_path);
|
||||
if (ret < 1 || ret >= PATH_MAX) {
|
||||
ret = -EINVAL;
|
||||
fprintf(stderr, "invalid counter dir path '%s'\n", dir_arg);
|
||||
fprintf(stderr, "invalid counter dir path '%s'\n", args->sysfs_path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -120,6 +114,7 @@ static int counters_cmd(int argc, char **argv)
|
||||
goto out;
|
||||
}
|
||||
memset(&ctrs[nr], 0, (alloced - nr) * sizeof(*ctrs));
|
||||
memset(&name_wid[nr], 0, (alloced - nr) * sizeof(*name_wid));
|
||||
}
|
||||
|
||||
ctr = &ctrs[nr];
|
||||
@@ -191,7 +186,7 @@ static int counters_cmd(int argc, char **argv)
|
||||
* one column of counters and use the max field widths from the
|
||||
* initial counter reads.
|
||||
*/
|
||||
if (table) {
|
||||
if (args->tabular) {
|
||||
min_rows = 1;
|
||||
cols = ws.ws_col / (name_wid[0] + 1 + val_wid[0] + 2);
|
||||
max_rows = nr / cols;
|
||||
@@ -276,9 +271,58 @@ out:
|
||||
return ret;
|
||||
};
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct counters_args *args = state->input;
|
||||
|
||||
switch (key) {
|
||||
case 't':
|
||||
args->tabular = true;
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (!args->sysfs_path)
|
||||
args->sysfs_path = strdup_or_error(state, arg);
|
||||
else
|
||||
argp_error(state, "more than one argument given");
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->sysfs_path)
|
||||
argp_error(state, "no sysfs path argument given");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "table", 't', NULL, 0, "Output in table format" },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static struct argp argp = {
|
||||
options,
|
||||
parse_opt,
|
||||
"SYSFS-DIR",
|
||||
"Show counters for a mounted volume"
|
||||
};
|
||||
|
||||
static int counters_cmd(int argc, char *argv[])
|
||||
{
|
||||
struct counters_args counters_args = {NULL};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &counters_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return do_counters(&counters_args);
|
||||
}
|
||||
|
||||
|
||||
static void __attribute__((constructor)) counters_ctor(void)
|
||||
{
|
||||
cmd_register("counters", "[-t] <sysfs dir>",
|
||||
"show [tablular] counters for a given mounted volume",
|
||||
counters_cmd);
|
||||
cmd_register_argp("counters", &argp, GROUP_INFO, counters_cmd);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef _DEV_H_
|
||||
#define _DEV_H_
|
||||
|
||||
#define BASE_SIZE_FMT "%.2f %s"
|
||||
#define BASE_SIZE_FMT "%.2f%s"
|
||||
#define BASE_SIZE_ARGS(sz) size_flt(sz, 1), size_str(sz, 1)
|
||||
|
||||
#define SIZE_FMT "%llu (%.2f %s)"
|
||||
|
||||
106
utils/src/df.c
106
utils/src/df.c
@@ -7,20 +7,28 @@
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <getopt.h>
|
||||
#include <assert.h>
|
||||
#include <stdbool.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "parse.h"
|
||||
#include "util.h"
|
||||
#include "format.h"
|
||||
#include "ioctl.h"
|
||||
#include "cmd.h"
|
||||
#include "dev.h"
|
||||
|
||||
#define ROWS 3
|
||||
#define COLS 6
|
||||
#define CHARS 20
|
||||
|
||||
static int df_cmd(int argc, char **argv)
|
||||
struct df_args {
|
||||
char *path;
|
||||
bool human_readable;
|
||||
};
|
||||
|
||||
static int do_df(struct df_args *args)
|
||||
{
|
||||
struct scoutfs_ioctl_alloc_detail ad;
|
||||
struct scoutfs_ioctl_alloc_detail_entry *ade = NULL;
|
||||
@@ -36,18 +44,9 @@ static int df_cmd(int argc, char **argv)
|
||||
int r;
|
||||
int c;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "must specify path\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fd = open(argv[1], O_RDONLY);
|
||||
if (fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
argv[1], strerror(errno), errno);
|
||||
return ret;
|
||||
}
|
||||
fd = get_path(args->path, O_RDONLY);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
sfm.valid_bytes = sizeof(struct scoutfs_ioctl_statfs_more);
|
||||
ret = ioctl(fd, SCOUTFS_IOC_STATFS_MORE, &sfm);
|
||||
@@ -96,18 +95,38 @@ static int df_cmd(int argc, char **argv)
|
||||
|
||||
snprintf(cells[1][0], CHARS, "MetaData");
|
||||
snprintf(cells[1][1], CHARS, "64KB");
|
||||
snprintf(cells[1][2], CHARS, "%llu", sfm.total_meta_blocks);
|
||||
snprintf(cells[1][3], CHARS, "%llu", sfm.total_meta_blocks - meta_free);
|
||||
snprintf(cells[1][4], CHARS, "%llu", meta_free);
|
||||
if (args->human_readable) {
|
||||
snprintf(cells[1][2], CHARS, BASE_SIZE_FMT,
|
||||
BASE_SIZE_ARGS(sfm.total_meta_blocks * SCOUTFS_BLOCK_LG_SIZE));
|
||||
snprintf(cells[1][3], CHARS, BASE_SIZE_FMT,
|
||||
BASE_SIZE_ARGS((sfm.total_meta_blocks - meta_free)
|
||||
* SCOUTFS_BLOCK_LG_SIZE));
|
||||
snprintf(cells[1][4], CHARS, BASE_SIZE_FMT,
|
||||
BASE_SIZE_ARGS(meta_free * SCOUTFS_BLOCK_LG_SIZE));
|
||||
} else {
|
||||
snprintf(cells[1][2], CHARS, "%llu", sfm.total_meta_blocks);
|
||||
snprintf(cells[1][3], CHARS, "%llu", sfm.total_meta_blocks - meta_free);
|
||||
snprintf(cells[1][4], CHARS, "%llu", meta_free);
|
||||
}
|
||||
snprintf(cells[1][5], CHARS, "%llu",
|
||||
((sfm.total_meta_blocks - meta_free) * 100) /
|
||||
sfm.total_meta_blocks);
|
||||
|
||||
snprintf(cells[2][0], CHARS, "Data");
|
||||
snprintf(cells[2][1], CHARS, "4KB");
|
||||
snprintf(cells[2][2], CHARS, "%llu", sfm.total_data_blocks);
|
||||
snprintf(cells[2][3], CHARS, "%llu", sfm.total_data_blocks - data_free);
|
||||
snprintf(cells[2][4], CHARS, "%llu", data_free);
|
||||
if (args->human_readable) {
|
||||
snprintf(cells[2][2], CHARS, BASE_SIZE_FMT,
|
||||
BASE_SIZE_ARGS(sfm.total_data_blocks * SCOUTFS_BLOCK_SM_SIZE));
|
||||
snprintf(cells[2][3], CHARS, BASE_SIZE_FMT,
|
||||
BASE_SIZE_ARGS((sfm.total_data_blocks - data_free)
|
||||
* SCOUTFS_BLOCK_SM_SIZE));
|
||||
snprintf(cells[2][4], CHARS, BASE_SIZE_FMT,
|
||||
BASE_SIZE_ARGS(data_free * SCOUTFS_BLOCK_SM_SIZE));
|
||||
} else {
|
||||
snprintf(cells[2][2], CHARS, "%llu", sfm.total_data_blocks);
|
||||
snprintf(cells[2][3], CHARS, "%llu", sfm.total_data_blocks - data_free);
|
||||
snprintf(cells[2][4], CHARS, "%llu", data_free);
|
||||
}
|
||||
snprintf(cells[2][5], CHARS, "%llu",
|
||||
((sfm.total_data_blocks - data_free) * 100) /
|
||||
sfm.total_data_blocks);
|
||||
@@ -131,8 +150,51 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct df_args *args = state->input;
|
||||
|
||||
switch (key) {
|
||||
case 'p':
|
||||
args->path = strdup_or_error(state, arg);
|
||||
break;
|
||||
case 'h':
|
||||
args->human_readable = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "path", 'p', "PATH", 0, "Path to ScoutFS filesystem"},
|
||||
{ "human-readable", 'h', NULL, 0, "Print sizes in human readable format (e.g., 1KB 234MB 2GB)"},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static struct argp argp = {
|
||||
options,
|
||||
parse_opt,
|
||||
"",
|
||||
"Show metadata and data block usage"
|
||||
};
|
||||
|
||||
static int df_cmd(int argc, char **argv)
|
||||
{
|
||||
struct df_args df_args = {NULL};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &df_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return do_df(&df_args);
|
||||
|
||||
}
|
||||
|
||||
static void __attribute__((constructor)) df_ctor(void)
|
||||
{
|
||||
cmd_register("df", "<path>",
|
||||
"show metadata and data block usage", df_cmd);
|
||||
cmd_register_argp("df", &argp, GROUP_CORE, df_cmd);
|
||||
}
|
||||
|
||||
@@ -8,44 +8,32 @@
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "parse.h"
|
||||
#include "util.h"
|
||||
#include "format.h"
|
||||
#include "ioctl.h"
|
||||
#include "parse.h"
|
||||
#include "cmd.h"
|
||||
|
||||
static int ino_path_cmd(int argc, char **argv)
|
||||
struct ino_args {
|
||||
char *path;
|
||||
u64 ino;
|
||||
};
|
||||
|
||||
static int do_ino_path(struct ino_args *args)
|
||||
{
|
||||
struct scoutfs_ioctl_ino_path args;
|
||||
struct scoutfs_ioctl_ino_path ioctl_args;
|
||||
struct scoutfs_ioctl_ino_path_result *res;
|
||||
unsigned int result_bytes;
|
||||
char *endptr = NULL;
|
||||
u64 ino;
|
||||
int ret;
|
||||
int fd;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "must specify ino and path\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ino = strtoull(argv[1], &endptr, 0);
|
||||
if (*endptr != '\0' ||
|
||||
((ino == LLONG_MIN || ino == LLONG_MAX) && errno == ERANGE)) {
|
||||
fprintf(stderr, "error parsing inode number '%s'\n",
|
||||
argv[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
fd = open(argv[2], O_RDONLY);
|
||||
if (fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
argv[2], strerror(errno), errno);
|
||||
return ret;
|
||||
}
|
||||
fd = get_path(args->path, O_RDONLY);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
result_bytes = offsetof(struct scoutfs_ioctl_ino_path_result,
|
||||
path[PATH_MAX]);
|
||||
@@ -57,13 +45,13 @@ static int ino_path_cmd(int argc, char **argv)
|
||||
goto out;
|
||||
}
|
||||
|
||||
args.ino = ino;
|
||||
args.dir_ino = 0;
|
||||
args.dir_pos = 0;
|
||||
args.result_ptr = (intptr_t)res;
|
||||
args.result_bytes = result_bytes;
|
||||
ioctl_args.ino = args->ino;
|
||||
ioctl_args.dir_ino = 0;
|
||||
ioctl_args.dir_pos = 0;
|
||||
ioctl_args.result_ptr = (intptr_t)res;
|
||||
ioctl_args.result_bytes = result_bytes;
|
||||
for (;;) {
|
||||
ret = ioctl(fd, SCOUTFS_IOC_INO_PATH, &args);
|
||||
ret = ioctl(fd, SCOUTFS_IOC_INO_PATH, &ioctl_args);
|
||||
if (ret < 0) {
|
||||
ret = -errno;
|
||||
if (ret == -ENOENT)
|
||||
@@ -73,10 +61,10 @@ static int ino_path_cmd(int argc, char **argv)
|
||||
|
||||
printf("%.*s\n", res->path_bytes, res->path);
|
||||
|
||||
args.dir_ino = res->dir_ino;
|
||||
args.dir_pos = res->dir_pos;
|
||||
if (++args.dir_pos == 0) {
|
||||
if (++args.dir_ino == 0)
|
||||
ioctl_args.dir_ino = res->dir_ino;
|
||||
ioctl_args.dir_pos = res->dir_pos;
|
||||
if (++ioctl_args.dir_pos == 0) {
|
||||
if (++ioctl_args.dir_ino == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -92,8 +80,60 @@ out:
|
||||
return ret;
|
||||
};
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct ino_args *args = state->input;
|
||||
int ret;
|
||||
|
||||
switch (key) {
|
||||
case 'p':
|
||||
args->path = strdup_or_error(state, arg);
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (args->ino)
|
||||
argp_error(state, "more than one argument given");
|
||||
ret = parse_u64(arg, &args->ino);
|
||||
if (ret)
|
||||
argp_error(state, "inode parse error");
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->ino) {
|
||||
argp_error(state, "must provide inode number");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "path", 'p', "PATH", 0, "Path to ScoutFS filesystem"},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static struct argp argp = {
|
||||
options,
|
||||
parse_opt,
|
||||
"INODE-NUM",
|
||||
"Print paths that refer to inode number"
|
||||
};
|
||||
|
||||
static int ino_path_cmd(int argc, char **argv)
|
||||
{
|
||||
struct ino_args ino_args = {NULL};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &ino_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return do_ino_path(&ino_args);
|
||||
}
|
||||
|
||||
|
||||
static void __attribute__((constructor)) ino_path_ctor(void)
|
||||
{
|
||||
cmd_register("ino-path", "<ino> <path>",
|
||||
"print paths that refer to inode #", ino_path_cmd);
|
||||
cmd_register_argp("ino-path", &argp, GROUP_SEARCH, ino_path_cmd);
|
||||
}
|
||||
|
||||
@@ -7,56 +7,31 @@
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <getopt.h>
|
||||
#include <ctype.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "parse.h"
|
||||
#include "util.h"
|
||||
#include "format.h"
|
||||
#include "ioctl.h"
|
||||
#include "cmd.h"
|
||||
|
||||
static struct option long_ops[] = {
|
||||
{ "file", 1, NULL, 'f' },
|
||||
{ NULL, 0, NULL, 0}
|
||||
struct list_hidden_xattr_args {
|
||||
char *filename;
|
||||
};
|
||||
|
||||
static int listxattr_hidden_cmd(int argc, char **argv)
|
||||
static int do_list_hidden_xattrs(struct list_hidden_xattr_args *args)
|
||||
{
|
||||
struct scoutfs_ioctl_listxattr_hidden lxh;
|
||||
char *path = NULL;
|
||||
char *buf = NULL;
|
||||
char *name;
|
||||
int fd = -1;
|
||||
int bytes;
|
||||
int len;
|
||||
int ret;
|
||||
int c;
|
||||
int i;
|
||||
|
||||
while ((c = getopt_long(argc, argv, "f:", long_ops, NULL)) != -1) {
|
||||
switch (c) {
|
||||
case 'f':
|
||||
path = strdup(optarg);
|
||||
if (!path) {
|
||||
fprintf(stderr, "path mem alloc failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case '?':
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (path == NULL) {
|
||||
fprintf(stderr, "must specify -f path to file\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&lxh, 0, sizeof(lxh));
|
||||
lxh.id_pos = 0;
|
||||
lxh.hash_pos = 0;
|
||||
@@ -69,11 +44,11 @@ static int listxattr_hidden_cmd(int argc, char **argv)
|
||||
}
|
||||
lxh.buf_ptr = (unsigned long)buf;
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
fd = open(args->filename, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
path, strerror(errno), errno);
|
||||
args->filename, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -139,9 +114,50 @@ out:
|
||||
return ret;
|
||||
};
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct list_hidden_xattr_args *args = state->input;
|
||||
|
||||
switch (key) {
|
||||
case ARGP_KEY_ARG:
|
||||
if (args->filename)
|
||||
argp_error(state, "more than one filename argument given");
|
||||
|
||||
args->filename = strdup_or_error(state, arg);
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->filename) {
|
||||
argp_error(state, "must specify filename");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct argp argp = {
|
||||
NULL,
|
||||
parse_opt,
|
||||
"FILE",
|
||||
"Print the names of hidden xattrs on a file"
|
||||
};
|
||||
|
||||
static int list_hidden_xattrs_cmd(int argc, char **argv)
|
||||
{
|
||||
struct list_hidden_xattr_args list_hidden_xattr_args = {NULL};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &list_hidden_xattr_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return do_list_hidden_xattrs(&list_hidden_xattr_args);
|
||||
}
|
||||
|
||||
|
||||
static void __attribute__((constructor)) listxattr_hidden_ctor(void)
|
||||
{
|
||||
cmd_register("listxattr-hidden", "-f <path>",
|
||||
"print the names of hidden xattrs on the file",
|
||||
listxattr_hidden_cmd);
|
||||
cmd_register_argp("list-hidden-xattrs", &argp, GROUP_INFO, list_hidden_xattrs_cmd);
|
||||
}
|
||||
|
||||
@@ -4,10 +4,20 @@
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "cmd.h"
|
||||
#include "util.h"
|
||||
|
||||
/*
|
||||
* Ensure no compiler-added padding sneaks into structs defined in these
|
||||
* headers.
|
||||
*/
|
||||
#pragma GCC diagnostic error "-Wpadded"
|
||||
#include "format.h"
|
||||
#include "ioctl.h"
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/*
|
||||
|
||||
406
utils/src/mkfs.c
406
utils/src/mkfs.c
@@ -11,12 +11,12 @@
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <getopt.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <ctype.h>
|
||||
#include <inttypes.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "cmd.h"
|
||||
@@ -30,13 +30,24 @@
|
||||
#include "bitops.h"
|
||||
#include "btree.h"
|
||||
#include "leaf_item_hash.h"
|
||||
#include "blkid.h"
|
||||
|
||||
static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
|
||||
/*
|
||||
* Update the block header fields and write out the block.
|
||||
*/
|
||||
static int write_block(int fd, u32 magic, __le64 fsid, u64 seq, u64 blkno,
|
||||
int shift, struct scoutfs_block_header *hdr)
|
||||
{
|
||||
size_t size = 1ULL << shift;
|
||||
ssize_t ret;
|
||||
|
||||
ret = pwrite(fd, blk, size, blkno << shift);
|
||||
hdr->magic = cpu_to_le32(magic);
|
||||
hdr->fsid = fsid;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->seq = cpu_to_le64(seq);
|
||||
hdr->crc = cpu_to_le32(crc_block(hdr, size));
|
||||
|
||||
ret = pwrite(fd, hdr, size, blkno << shift);
|
||||
if (ret != size) {
|
||||
fprintf(stderr, "write to blkno %llu returned %zd: %s (%d)\n",
|
||||
blkno, ret, strerror(errno), errno);
|
||||
@@ -46,35 +57,18 @@ static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the block's header and write it out.
|
||||
*/
|
||||
static int write_block(int fd, u64 blkno, int shift,
|
||||
struct scoutfs_super_block *super,
|
||||
struct scoutfs_block_header *hdr)
|
||||
{
|
||||
size_t size = 1ULL << shift;
|
||||
|
||||
if (super)
|
||||
*hdr = super->hdr;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->crc = cpu_to_le32(crc_block(hdr, size));
|
||||
|
||||
return write_raw_block(fd, blkno, shift, hdr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write the single btree block that contains the blkno and len indexed
|
||||
* items to store the given extent, and update the root to point to it.
|
||||
*/
|
||||
static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
static int write_alloc_root(int fd, __le64 fsid,
|
||||
struct scoutfs_alloc_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 start, u64 len)
|
||||
u64 seq, u64 blkno, u64 start, u64 len)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
|
||||
btree_init_root_single(&root->root, bt, blkno, 1, super->hdr.fsid);
|
||||
btree_init_root_single(&root->root, bt, seq, blkno);
|
||||
root->total_len = cpu_to_le64(len);
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
@@ -93,12 +87,20 @@ static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
key.skfl_blkno = cpu_to_le64(start);
|
||||
btree_append_item(bt, &key, NULL, 0);
|
||||
|
||||
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
|
||||
SCOUTFS_BLOCK_LG_SIZE));
|
||||
|
||||
return write_raw_block(fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
|
||||
return write_block(fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, seq, blkno,
|
||||
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
|
||||
}
|
||||
|
||||
struct mkfs_args {
|
||||
char *meta_device;
|
||||
char *data_device;
|
||||
unsigned long long max_meta_size;
|
||||
unsigned long long max_data_size;
|
||||
bool force;
|
||||
int nr_slots;
|
||||
struct scoutfs_quorum_slot slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
/*
|
||||
* Make a new file system by writing:
|
||||
* - super blocks
|
||||
@@ -108,19 +110,20 @@ static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
* Superblock is written to both metadata and data devices, everything else is
|
||||
* written only to the metadata device.
|
||||
*/
|
||||
static int write_new_fs(char *meta_path, char *data_path,
|
||||
int meta_fd, int data_fd,
|
||||
u8 quorum_count,
|
||||
u64 max_meta_size, u64 max_data_size)
|
||||
static int do_mkfs(struct mkfs_args *args)
|
||||
{
|
||||
struct scoutfs_super_block *super;
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_inode inode;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct scoutfs_btree_block *bt = NULL;
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct scoutfs_key key;
|
||||
struct timeval tv;
|
||||
int meta_fd = -1;
|
||||
int data_fd = -1;
|
||||
char uuid_str[37];
|
||||
void *zeros;
|
||||
void *zeros = NULL;
|
||||
char *indent;
|
||||
u64 blkno;
|
||||
u64 meta_size;
|
||||
u64 data_size;
|
||||
@@ -130,10 +133,39 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
u64 last_data;
|
||||
u64 meta_start;
|
||||
u64 meta_len;
|
||||
__le64 fsid;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
pseudo_random_bytes(&fsid, sizeof(fsid));
|
||||
|
||||
meta_fd = open(args->meta_device, O_RDWR | O_EXCL);
|
||||
if (meta_fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
args->meta_device, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
if (!args->force) {
|
||||
ret = check_bdev(meta_fd, args->meta_device, "meta");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
data_fd = open(args->data_device, O_RDWR | O_EXCL);
|
||||
if (data_fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
args->data_device, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
if (!args->force) {
|
||||
ret = check_bdev(data_fd, args->data_device, "data");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
super = calloc(1, SCOUTFS_BLOCK_SM_SIZE);
|
||||
bt = calloc(1, SCOUTFS_BLOCK_LG_SIZE);
|
||||
@@ -145,20 +177,17 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = device_size(meta_path, meta_fd, 2ULL * (1024 * 1024 * 1024),
|
||||
max_meta_size, "meta", &meta_size);
|
||||
ret = device_size(args->meta_device, meta_fd, 2ULL * (1024 * 1024 * 1024),
|
||||
args->max_meta_size, "meta", &meta_size);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = device_size(data_path, data_fd, 8ULL * (1024 * 1024 * 1024),
|
||||
max_data_size, "data", &data_size);
|
||||
ret = device_size(args->data_device, data_fd, 8ULL * (1024 * 1024 * 1024),
|
||||
args->max_data_size, "data", &data_size);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* metadata blocks start after the quorum blocks */
|
||||
next_meta = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT;
|
||||
/* rest of meta dev is available for metadata blocks */
|
||||
next_meta = SCOUTFS_META_DEV_START_BLKNO;
|
||||
last_meta = (meta_size >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
|
||||
/* Data blocks go on the data dev */
|
||||
first_data = SCOUTFS_DATA_DEV_START_BLKNO;
|
||||
@@ -166,10 +195,7 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
|
||||
/* partially initialize the super so we can use it to init others */
|
||||
memset(super, 0, SCOUTFS_BLOCK_SM_SIZE);
|
||||
pseudo_random_bytes(&super->hdr.fsid, sizeof(super->hdr.fsid));
|
||||
super->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SUPER);
|
||||
super->hdr.seq = cpu_to_le64(1);
|
||||
super->format_hash = cpu_to_le64(SCOUTFS_FORMAT_HASH);
|
||||
super->version = cpu_to_le64(SCOUTFS_INTEROP_VERSION);
|
||||
uuid_generate(super->uuid);
|
||||
super->next_ino = cpu_to_le64(SCOUTFS_ROOT_INO + 1);
|
||||
super->next_trans_seq = cpu_to_le64(1);
|
||||
@@ -179,11 +205,14 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
super->total_data_blocks = cpu_to_le64(last_data - first_data + 1);
|
||||
super->first_data_blkno = cpu_to_le64(first_data);
|
||||
super->last_data_blkno = cpu_to_le64(last_data);
|
||||
super->quorum_count = quorum_count;
|
||||
|
||||
assert(sizeof(args->slots) ==
|
||||
member_sizeof(struct scoutfs_super_block, qconf.slots));
|
||||
memcpy(super->qconf.slots, args->slots, sizeof(args->slots));
|
||||
|
||||
/* fs root starts with root inode and its index items */
|
||||
blkno = next_meta++;
|
||||
btree_init_root_single(&super->fs_root, bt, blkno, 1, super->hdr.fsid);
|
||||
btree_init_root_single(&super->fs_root, bt, 1, blkno);
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_INODE_INDEX_ZONE;
|
||||
@@ -208,10 +237,8 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
inode.mtime.nsec = inode.atime.nsec;
|
||||
btree_append_item(bt, &key, &inode, sizeof(inode));
|
||||
|
||||
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
|
||||
SCOUTFS_BLOCK_LG_SIZE));
|
||||
|
||||
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, 1, blkno,
|
||||
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -220,11 +247,6 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
lblk = (void *)bt;
|
||||
memset(lblk, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
|
||||
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
|
||||
lblk->hdr.fsid = super->hdr.fsid;
|
||||
lblk->hdr.blkno = cpu_to_le64(blkno);
|
||||
lblk->hdr.seq = cpu_to_le64(1);
|
||||
|
||||
meta_len = (64 * 1024 * 1024) >> SCOUTFS_BLOCK_LG_SHIFT;
|
||||
for (i = 0; i < meta_len; i++) {
|
||||
lblk->blknos[i] = cpu_to_le64(next_meta);
|
||||
@@ -232,20 +254,20 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
}
|
||||
lblk->nr = cpu_to_le32(i);
|
||||
|
||||
super->server_meta_avail[0].ref.blkno = lblk->hdr.blkno;
|
||||
super->server_meta_avail[0].ref.seq = lblk->hdr.seq;
|
||||
super->server_meta_avail[0].ref.blkno = cpu_to_le64(blkno);
|
||||
super->server_meta_avail[0].ref.seq = cpu_to_le64(1);
|
||||
super->server_meta_avail[0].total_nr = le32_to_le64(lblk->nr);
|
||||
super->server_meta_avail[0].first_nr = lblk->nr;
|
||||
|
||||
lblk->hdr.crc = cpu_to_le32(crc_block(&bt->hdr, SCOUTFS_BLOCK_LG_SIZE));
|
||||
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, lblk);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, fsid, 1,
|
||||
blkno, SCOUTFS_BLOCK_LG_SHIFT, &lblk->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* the data allocator has a single extent */
|
||||
blkno = next_meta++;
|
||||
ret = write_alloc_root(super, meta_fd, &super->data_alloc, bt,
|
||||
blkno, first_data,
|
||||
ret = write_alloc_root(meta_fd, fsid, &super->data_alloc, bt,
|
||||
1, blkno, first_data,
|
||||
le64_to_cpu(super->total_data_blocks));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -262,8 +284,8 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
/* each meta alloc root contains a portion of free metadata extents */
|
||||
for (i = 0; i < array_size(super->meta_alloc); i++) {
|
||||
blkno = next_meta++;
|
||||
ret = write_alloc_root(super, meta_fd, &super->meta_alloc[i], bt,
|
||||
blkno, meta_start,
|
||||
ret = write_alloc_root(meta_fd, fsid, &super->meta_alloc[i], bt,
|
||||
1, blkno, meta_start,
|
||||
min(meta_len,
|
||||
last_meta - meta_start + 1));
|
||||
if (ret < 0)
|
||||
@@ -273,9 +295,11 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
}
|
||||
|
||||
/* zero out quorum blocks */
|
||||
hdr = zeros;
|
||||
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
|
||||
ret = write_raw_block(meta_fd, SCOUTFS_QUORUM_BLKNO + i,
|
||||
SCOUTFS_BLOCK_SM_SHIFT, zeros);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_QUORUM, fsid,
|
||||
1, SCOUTFS_QUORUM_BLKNO + i,
|
||||
SCOUTFS_BLOCK_SM_SHIFT, hdr);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "error zeroing quorum block: %s (%d)\n",
|
||||
strerror(-errno), -errno);
|
||||
@@ -284,29 +308,30 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
}
|
||||
|
||||
/* write the super block to data dev and meta dev*/
|
||||
super->hdr.seq = cpu_to_le64(1);
|
||||
ret = write_block(data_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
NULL, &super->hdr);
|
||||
ret = write_block(data_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid, 1,
|
||||
SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
&super->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (fsync(data_fd)) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to fsync '%s': %s (%d)\n",
|
||||
data_path, strerror(errno), errno);
|
||||
args->data_device, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
super->flags |= cpu_to_le64(SCOUTFS_FLAG_IS_META_BDEV);
|
||||
ret = write_block(meta_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
NULL, &super->hdr);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid,
|
||||
1, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
&super->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (fsync(meta_fd)) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to fsync '%s': %s (%d)\n",
|
||||
meta_path, strerror(errno), errno);
|
||||
args->meta_device, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -316,21 +341,35 @@ static int write_new_fs(char *meta_path, char *data_path,
|
||||
" meta device path: %s\n"
|
||||
" data device path: %s\n"
|
||||
" fsid: %llx\n"
|
||||
" format hash: %llx\n"
|
||||
" version: %llx\n"
|
||||
" uuid: %s\n"
|
||||
" 64KB metadata blocks: "SIZE_FMT"\n"
|
||||
" 4KB data blocks: "SIZE_FMT"\n"
|
||||
" quorum count: %u\n",
|
||||
meta_path,
|
||||
data_path,
|
||||
" quorum slots: ",
|
||||
args->meta_device,
|
||||
args->data_device,
|
||||
le64_to_cpu(super->hdr.fsid),
|
||||
le64_to_cpu(super->format_hash),
|
||||
le64_to_cpu(super->version),
|
||||
uuid_str,
|
||||
SIZE_ARGS(le64_to_cpu(super->total_meta_blocks),
|
||||
SCOUTFS_BLOCK_LG_SIZE),
|
||||
SIZE_ARGS(le64_to_cpu(super->total_data_blocks),
|
||||
SCOUTFS_BLOCK_SM_SIZE),
|
||||
super->quorum_count);
|
||||
SCOUTFS_BLOCK_SM_SIZE));
|
||||
|
||||
indent = "";
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
struct scoutfs_quorum_slot *sl = &super->qconf.slots[i];
|
||||
struct in_addr in;
|
||||
|
||||
if (sl->addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
|
||||
continue;
|
||||
|
||||
in.s_addr = htonl(le32_to_cpu(sl->addr.v4.addr));
|
||||
printf("%s%u: %s:%u", indent,
|
||||
i, inet_ntoa(in), le16_to_cpu(sl->addr.v4.port));
|
||||
indent = "\n ";
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
@@ -340,102 +379,153 @@ out:
|
||||
free(bt);
|
||||
if (zeros)
|
||||
free(zeros);
|
||||
if (meta_fd != -1)
|
||||
close(meta_fd);
|
||||
if (data_fd != -1)
|
||||
close(data_fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct option long_ops[] = {
|
||||
{ "quorum_count", 1, NULL, 'Q' },
|
||||
{ NULL, 0, NULL, 0}
|
||||
};
|
||||
|
||||
static int mkfs_func(int argc, char *argv[])
|
||||
static bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
|
||||
{
|
||||
unsigned long long ull;
|
||||
u8 quorum_count = 0;
|
||||
u64 max_data_size = 0;
|
||||
u64 max_meta_size = 0;
|
||||
char *end = NULL;
|
||||
char *meta_path;
|
||||
char *data_path;
|
||||
int meta_fd;
|
||||
int data_fd;
|
||||
int ret;
|
||||
int c;
|
||||
struct in_addr in;
|
||||
bool valid = true;
|
||||
char *addr;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
while ((c = getopt_long(argc, argv, "Q:D:M:", long_ops, NULL)) != -1) {
|
||||
switch (c) {
|
||||
case 'Q':
|
||||
ull = strtoull(optarg, &end, 0);
|
||||
if (*end != '\0' || ull == 0 ||
|
||||
ull > SCOUTFS_QUORUM_MAX_COUNT) {
|
||||
printf("scoutfs: invalid quorum count '%s'\n",
|
||||
optarg);
|
||||
return -EINVAL;
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_NONE))
|
||||
continue;
|
||||
|
||||
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4)) {
|
||||
fprintf(stderr, "quorum slot nr %u has invalid family %u\n",
|
||||
i, le16_to_cpu(slots[i].addr.v4.family));
|
||||
valid = false;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
|
||||
continue;
|
||||
|
||||
if (slots[i].addr.v4.addr == slots[j].addr.v4.addr &&
|
||||
slots[i].addr.v4.port == slots[j].addr.v4.port) {
|
||||
|
||||
in.s_addr =
|
||||
htonl(le32_to_cpu(slots[i].addr.v4.addr));
|
||||
addr = inet_ntoa(in);
|
||||
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
|
||||
i, j, addr,
|
||||
le16_to_cpu(slots[i].addr.v4.port));
|
||||
valid = false;
|
||||
}
|
||||
quorum_count = ull;
|
||||
break;
|
||||
case 'D':
|
||||
ret = parse_human(optarg, &max_data_size);
|
||||
if (ret < 0) {
|
||||
printf("scoutfs: invalid data device size '%s'\n",
|
||||
optarg);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case 'M':
|
||||
ret = parse_human(optarg, &max_meta_size);
|
||||
if (ret < 0) {
|
||||
printf("scoutfs: invalid meta device size '%s'\n",
|
||||
optarg);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case '?':
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (optind + 2 != argc) {
|
||||
printf("scoutfs: mkfs: paths to metadata and data devices are required\n");
|
||||
return -EINVAL;
|
||||
return valid;
|
||||
}
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct mkfs_args *args = state->input;
|
||||
struct scoutfs_quorum_slot slot;
|
||||
int ret;
|
||||
|
||||
switch (key) {
|
||||
case 'Q':
|
||||
ret = parse_quorum_slot(&slot, arg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (args->slots[ret].addr.v4.family != cpu_to_le16(SCOUTFS_AF_NONE))
|
||||
argp_error(state, "Quorum slot %u already specified before slot '%s'\n",
|
||||
ret, arg);
|
||||
args->slots[ret] = slot;
|
||||
args->nr_slots++;
|
||||
break;
|
||||
case 'f':
|
||||
args->force = true;
|
||||
break;
|
||||
case 'm': /* max-meta-size */
|
||||
{
|
||||
u64 prev_val;
|
||||
ret = parse_human(arg, &args->max_meta_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
prev_val = args->max_meta_size;
|
||||
args->max_meta_size = round_down(args->max_meta_size, SCOUTFS_BLOCK_LG_SIZE);
|
||||
if (args->max_meta_size != prev_val)
|
||||
fprintf(stderr, "Meta dev size %llu rounded down to %llu bytes\n",
|
||||
prev_val, args->max_meta_size);
|
||||
break;
|
||||
}
|
||||
case 'd': /* max-data-size */
|
||||
{
|
||||
u64 prev_val;
|
||||
ret = parse_human(arg, &args->max_data_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
prev_val = args->max_data_size;
|
||||
args->max_data_size = round_down(args->max_data_size, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (args->max_data_size != prev_val)
|
||||
fprintf(stderr, "Data dev size %llu rounded down to %llu bytes\n",
|
||||
prev_val, args->max_data_size);
|
||||
break;
|
||||
}
|
||||
case ARGP_KEY_ARG:
|
||||
if (!args->meta_device)
|
||||
args->meta_device = strdup_or_error(state, arg);
|
||||
else if (!args->data_device)
|
||||
args->data_device = strdup_or_error(state, arg);
|
||||
else
|
||||
argp_error(state, "more than two arguments given");
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->nr_slots)
|
||||
argp_error(state, "must specify at least one quorum slot with --quorum-count|-Q");
|
||||
if (!args->meta_device)
|
||||
argp_error(state, "no metadata device argument given");
|
||||
if (!args->data_device)
|
||||
argp_error(state, "no data device argument given");
|
||||
if (!valid_quorum_slots(args->slots))
|
||||
argp_error(state, "invalid quorum slot configuration");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
meta_path = argv[optind];
|
||||
data_path = argv[optind + 1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!quorum_count) {
|
||||
printf("provide quorum count with --quorum_count|-Q option\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
static struct argp_option options[] = {
|
||||
{ "quorum-slot", 'Q', "NR,ADDR,PORT", 0, "Specify quorum slot addresses [Required]"},
|
||||
{ "force", 'f', NULL, 0, "Overwrite existing data on block devices"},
|
||||
{ "max-meta-size", 'm', "SIZE", 0, "Use a size less than the base metadata device size (bytes or KMGTP units)"},
|
||||
{ "max-data-size", 'd', "SIZE", 0, "Use a size less than the base data device size (bytes or KMGTP units)"},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
meta_fd = open(meta_path, O_RDWR | O_EXCL);
|
||||
if (meta_fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open metadata device '%s': %s (%d)\n",
|
||||
meta_path, strerror(errno), errno);
|
||||
static struct argp argp = {
|
||||
options,
|
||||
parse_opt,
|
||||
"META-DEVICE DATA-DEVICE",
|
||||
"Initialize a new ScoutFS filesystem"
|
||||
};
|
||||
|
||||
static int mkfs_cmd(int argc, char *argv[])
|
||||
{
|
||||
struct mkfs_args mkfs_args = {NULL,};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &mkfs_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
data_fd = open(data_path, O_RDWR | O_EXCL);
|
||||
if (data_fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open data device '%s': %s (%d)\n",
|
||||
data_path, strerror(errno), errno);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = write_new_fs(meta_path, data_path, meta_fd, data_fd,
|
||||
quorum_count, max_meta_size, max_data_size);
|
||||
close(meta_fd);
|
||||
close(data_fd);
|
||||
|
||||
return ret;
|
||||
return do_mkfs(&mkfs_args);
|
||||
}
|
||||
|
||||
static void __attribute__((constructor)) mkfs_ctor(void)
|
||||
{
|
||||
cmd_register("mkfs", "<path>", "write a new file system", mkfs_func);
|
||||
cmd_register_argp("mkfs", &argp, GROUP_CORE, mkfs_cmd);
|
||||
|
||||
/* for lack of some other place to put these.. */
|
||||
build_assert(sizeof(uuid_t) == SCOUTFS_UUID_BYTES);
|
||||
|
||||
161
utils/src/move_blocks.c
Normal file
161
utils/src/move_blocks.c
Normal file
@@ -0,0 +1,161 @@
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <getopt.h>
|
||||
#include <assert.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "util.h"
|
||||
#include "format.h"
|
||||
#include "ioctl.h"
|
||||
#include "cmd.h"
|
||||
#include "parse.h"
|
||||
|
||||
struct move_blocks_args {
|
||||
char *from_path;
|
||||
u64 from_offset;
|
||||
u64 length;
|
||||
char *to_path;
|
||||
u64 to_offset;
|
||||
|
||||
unsigned from_off_set:1,
|
||||
len_set:1,
|
||||
to_off_set:1;
|
||||
};
|
||||
|
||||
static int do_move_blocks(struct move_blocks_args *args)
|
||||
{
|
||||
struct scoutfs_ioctl_move_blocks mb = {0};
|
||||
int from_fd = -1;
|
||||
int to_fd = -1;
|
||||
int ret;
|
||||
|
||||
from_fd = open(args->from_path, O_RDWR);
|
||||
if (from_fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
args->from_path, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
to_fd = open(args->to_path, O_RDWR);
|
||||
if (to_fd < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "failed to open '%s': %s (%d)\n",
|
||||
args->to_path, strerror(errno), errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mb.from_fd = from_fd;
|
||||
mb.from_off = args->from_offset;
|
||||
mb.len = args->length;
|
||||
mb.to_off = args->to_offset;
|
||||
|
||||
ret = ioctl(to_fd, SCOUTFS_IOC_MOVE_BLOCKS, &mb);
|
||||
if (ret < 0) {
|
||||
ret = -errno;
|
||||
fprintf(stderr, "ioctl failed on '%s': %s (%d)\n",
|
||||
args->to_path, strerror(errno), errno);
|
||||
}
|
||||
|
||||
out:
|
||||
if (from_fd >= 0)
|
||||
close(from_fd);
|
||||
if (to_fd >= 0)
|
||||
close(to_fd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int parse_move_blocks_opts(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct move_blocks_args *args = state->input;
|
||||
int ret;
|
||||
|
||||
switch (key) {
|
||||
case 'f':
|
||||
ret = parse_u64(arg, &args->from_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
args->from_off_set = 1;
|
||||
break;
|
||||
case 'l':
|
||||
ret = parse_human(arg, &args->length);
|
||||
if (ret)
|
||||
return ret;
|
||||
args->len_set = 1;
|
||||
break;
|
||||
case 't':
|
||||
ret = parse_human(arg, &args->to_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
args->to_off_set = 1;
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (args->to_path)
|
||||
argp_error(state, "more than two file path arguments given");
|
||||
if (args->from_path)
|
||||
args->to_path = strdup_or_error(state, arg);
|
||||
else
|
||||
args->from_path = strdup_or_error(state, arg);
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->from_path)
|
||||
argp_error(state, "must provide from file path");
|
||||
if (!args->to_path)
|
||||
argp_error(state, "must provide to file path");
|
||||
if (!args->from_off_set)
|
||||
argp_error(state, "must provide from file offset --from-offset");
|
||||
if (!args->len_set)
|
||||
argp_error(state, "must provide region length --length");
|
||||
if (!args->to_off_set)
|
||||
argp_error(state, "must provide to file offset --to-offset");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct argp_option move_blocks_options[] = {
|
||||
{ "from-offset", 'f', "OFFSET", 0,
|
||||
"Byte offset in from file of region to move [Required]"},
|
||||
{ "length", 'l', "LENGTH", 0,
|
||||
"Length in bytes of region to move between files [Required]"},
|
||||
{ "to-offset", 't', "OFFSET", 0,
|
||||
"Byte offset in to file where region will be moved to [Required]"},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static struct argp move_blocks_argp = {
|
||||
move_blocks_options,
|
||||
parse_move_blocks_opts,
|
||||
"FROM_FILE --from-offset OFFSET --length LENGTH TO_FILE --to-offset OFFSET",
|
||||
"Move a fixed-size region of extents from one regular file to another",
|
||||
};
|
||||
|
||||
static int move_blocks_cmd(int argc, char **argv)
|
||||
{
|
||||
struct move_blocks_args args = {NULL};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&move_blocks_argp, argc, argv, 0, NULL, &args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return do_move_blocks(&args);
|
||||
}
|
||||
|
||||
static void __attribute__((constructor)) move_blocks_ctor(void)
|
||||
{
|
||||
cmd_register_argp("move-blocks", &move_blocks_argp, GROUP_AGENT,
|
||||
move_blocks_cmd);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user