mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-11 06:00:19 +00:00
Compare commits
212 Commits
zab/lock_s
...
v1.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2634fadfcb | ||
|
|
0c1f19556d | ||
|
|
19caae3da8 | ||
|
|
2989afbf46 | ||
|
|
730a84af92 | ||
|
|
5b77133c3b | ||
|
|
329ac0347d | ||
|
|
15d7eec1f9 | ||
|
|
cff17a4cae | ||
|
|
9fa2c6af89 | ||
|
|
e067961714 | ||
|
|
7a96e03148 | ||
|
|
e9b3cc873a | ||
|
|
5f2259c48f | ||
|
|
e14912974d | ||
|
|
813ce24d79 | ||
|
|
e2ce5ab6da | ||
|
|
89ca903c41 | ||
|
|
e3c7e21c40 | ||
|
|
e97ea5407d | ||
|
|
8db5c118c3 | ||
|
|
61ad844891 | ||
|
|
2c8f5d8fc1 | ||
|
|
8a504cd5ae | ||
|
|
99a1cc704f | ||
|
|
166ab58b99 | ||
|
|
8bc1ee8346 | ||
|
|
285b68879a | ||
|
|
1ac3efe701 | ||
|
|
ce76682db7 | ||
|
|
686f8515bc | ||
|
|
93bc52cc54 | ||
|
|
1108d1288a | ||
|
|
0abcd5a004 | ||
|
|
888ad8ec5c | ||
|
|
16ea0ef671 | ||
|
|
1b8e3f7c05 | ||
|
|
3ae0ebd0d8 | ||
|
|
714b7f2a84 | ||
|
|
945f8b4828 | ||
|
|
b5ccefeeb9 | ||
|
|
ea08942824 | ||
|
|
95f2a87864 | ||
|
|
38ee2defd5 | ||
|
|
0fc8ccb122 | ||
|
|
e4a3c2b95d | ||
|
|
cf4e6611d3 | ||
|
|
65429a9cc4 | ||
|
|
d764ed7c43 | ||
|
|
465e5ee769 | ||
|
|
83a6bbb640 | ||
|
|
f02d68f567 | ||
|
|
5d6a510e25 | ||
|
|
1b4d291bf7 | ||
|
|
223ee5deef | ||
|
|
8f60ac06c5 | ||
|
|
932a842ae3 | ||
|
|
618a7a4c47 | ||
|
|
9ebf43db99 | ||
|
|
e38beee85a | ||
|
|
20ac2e35fa | ||
|
|
80ee2c6d57 | ||
|
|
42c4c6dd24 | ||
|
|
7d71b610af | ||
|
|
70ede28e39 | ||
|
|
b477604339 | ||
|
|
75f9aabe75 | ||
|
|
cf512c5fcf | ||
|
|
a53d6d1a8e | ||
|
|
95ed36f9d3 | ||
|
|
94e5bc1457 | ||
|
|
366f615c9f | ||
|
|
ac2587017e | ||
|
|
1cdcf41ac7 | ||
|
|
024426df28 | ||
|
|
a0690070ae | ||
|
|
4e00f95014 | ||
|
|
0c95388f3b | ||
|
|
d255dd3b32 | ||
|
|
9b4ac64312 | ||
|
|
22f9ab4dab | ||
|
|
501953d69e | ||
|
|
66b8c5fbd7 | ||
|
|
3c6c2194bd | ||
|
|
6ca8c0eec2 | ||
|
|
ea2b01434e | ||
|
|
d5eec7d001 | ||
|
|
ab92d8d251 | ||
|
|
b9a0f1709f | ||
|
|
a59fd5865d | ||
|
|
46edf82b6b | ||
|
|
e9078d83bf | ||
|
|
79fbaa6481 | ||
|
|
9b9d3cf6fc | ||
|
|
ad5662b892 | ||
|
|
f5577e26b1 | ||
|
|
5f57785790 | ||
|
|
2a33b9faf0 | ||
|
|
3740c0a995 | ||
|
|
a4f5293e78 | ||
|
|
0c3026a2b7 | ||
|
|
5bc95fac7d | ||
|
|
36fcc4665d | ||
|
|
b0a08eb922 | ||
|
|
bb571377dc | ||
|
|
5897f4d889 | ||
|
|
999093bfc9 | ||
|
|
05b5d93365 | ||
|
|
4d7191dc48 | ||
|
|
4495dbdce6 | ||
|
|
70569b0448 | ||
|
|
823838cf01 | ||
|
|
89b5865a4c | ||
|
|
7cf9cd8c20 | ||
|
|
65ac42831f | ||
|
|
dde6dab0a1 | ||
|
|
cb1726681c | ||
|
|
cdff272163 | ||
|
|
7e935898ab | ||
|
|
6d0694f1b0 | ||
|
|
fd686cab86 | ||
|
|
4c1181c055 | ||
|
|
d6bed7181f | ||
|
|
4893a6f915 | ||
|
|
384590f016 | ||
|
|
192f077c16 | ||
|
|
a9baeab22e | ||
|
|
b7ab26539a | ||
|
|
c51f0c37da | ||
|
|
52107424dd | ||
|
|
099a65ab07 | ||
|
|
21c5724dd5 | ||
|
|
3974d98f6b | ||
|
|
2901b43906 | ||
|
|
03d7a4e7fe | ||
|
|
d5d3b12986 | ||
|
|
e4dca8ddcc | ||
|
|
011b7d52e5 | ||
|
|
3a9db45194 | ||
|
|
53f11f5479 | ||
|
|
b4ede2ac6a | ||
|
|
cbe8d77f78 | ||
|
|
5f682dabb5 | ||
|
|
120c2d342a | ||
|
|
84454b38c5 | ||
|
|
29cfa81574 | ||
|
|
73bf916182 | ||
|
|
9db3b475c0 | ||
|
|
24d682bf81 | ||
|
|
2957f3e301 | ||
|
|
07210b5734 | ||
|
|
0374661a92 | ||
|
|
28759f3269 | ||
|
|
5c3fdb48af | ||
|
|
a7828a6410 | ||
|
|
a1d46e1a92 | ||
|
|
d67db6662b | ||
|
|
c5c050bef0 | ||
|
|
96d286d6e5 | ||
|
|
9febc6b5dc | ||
|
|
045b3ca8d4 | ||
|
|
ff882a4c4f | ||
|
|
3d1a0f06c0 | ||
|
|
3488b4e6e0 | ||
|
|
c482204fcf | ||
|
|
9711fef122 | ||
|
|
91acf92666 | ||
|
|
9c2122f7de | ||
|
|
4d3ea3b59b | ||
|
|
298a6a8865 | ||
|
|
082924df1a | ||
|
|
d8478ed6f1 | ||
|
|
0538c882bc | ||
|
|
3a03a6a20c | ||
|
|
b6d0a45f6d | ||
|
|
d7f8896fac | ||
|
|
65c39e5f97 | ||
|
|
3c69861c03 | ||
|
|
05ae756b74 | ||
|
|
9051ceb6fc | ||
|
|
bad1c602f9 | ||
|
|
cee6ad34d3 | ||
|
|
38a4a56741 | ||
|
|
76076011a2 | ||
|
|
bdc0282fa7 | ||
|
|
1199bac91d | ||
|
|
1e460e5cb0 | ||
|
|
877e30d60f | ||
|
|
a972e42fba | ||
|
|
0706669047 | ||
|
|
76cef6fdfc | ||
|
|
aad2d3db59 | ||
|
|
933fc687c3 | ||
|
|
6663034295 | ||
|
|
ab5466a771 | ||
|
|
f3764b873b | ||
|
|
9ebc9d0f66 | ||
|
|
8b78f701a1 | ||
|
|
1f1f40f079 | ||
|
|
943351944a | ||
|
|
b060eb4f5d | ||
|
|
2dde729791 | ||
|
|
ccb7c0bf4b | ||
|
|
e9d04dcf8d | ||
|
|
5dceac32db | ||
|
|
ef440ead28 | ||
|
|
d0b04e790c | ||
|
|
54644a5074 | ||
|
|
52c2a465db | ||
|
|
bc4975fad4 | ||
|
|
9de3ae6dcb | ||
|
|
0aa6005c99 |
133
README.md
133
README.md
@@ -1,135 +1,24 @@
|
||||
# Introduction
|
||||
|
||||
scoutfs is a clustered in-kernel Linux filesystem designed and built
|
||||
from the ground up to support large archival systems.
|
||||
scoutfs is a clustered in-kernel Linux filesystem designed to support
|
||||
large archival systems. It features additional interfaces and metadata
|
||||
so that archive agents can perform their maintenance workflows without
|
||||
walking all the files in the namespace. Its cluster support lets
|
||||
deployments add nodes to satisfy archival tier bandwidth targets.
|
||||
|
||||
Its key differentiating features are:
|
||||
The design goal is to reach file populations in the trillions, with the
|
||||
archival bandwidth to match, while remaining operational and responsive.
|
||||
|
||||
- Integrated consistent indexing accelerates archival maintenance operations
|
||||
- Commit logs allow nodes to write concurrently without contention
|
||||
|
||||
It meets best of breed expectations:
|
||||
Highlights of the design and implementation include:
|
||||
|
||||
* Fully consistent POSIX semantics between nodes
|
||||
* Rich metadata to ensure the integrity of metadata references
|
||||
* Atomic transactions to maintain consistent persistent structures
|
||||
* First class kernel implementation for high performance and low latency
|
||||
* Integrated archival metadata replaces syncing to external databases
|
||||
* Dynamic seperation of resources lets nodes write in parallel
|
||||
* 64bit throughout; no limits on file or directory sizes or counts
|
||||
* Open GPLv2 implementation
|
||||
|
||||
Learn more in the [white paper](https://docs.wixstatic.com/ugd/aaa89b_88a5cc84be0b4d1a90f60d8900834d28.pdf).
|
||||
|
||||
# Current Status
|
||||
|
||||
**Alpha Open Source Development**
|
||||
|
||||
scoutfs is under heavy active development. We're developing it in the
|
||||
open to give the community an opportunity to affect the design and
|
||||
implementation.
|
||||
|
||||
The core architectural design elements are in place. Much surrounding
|
||||
functionality hasn't been implemented. It's appropriate for early
|
||||
adopters and interested developers, not for production use.
|
||||
|
||||
In that vein, expect significant incompatible changes to both the format
|
||||
of network messages and persistent structures. Since the format hash-checking
|
||||
has now been removed in preparation for release, if there is any doubt, mkfs
|
||||
is strongly recommended.
|
||||
|
||||
The current kernel module is developed against the RHEL/CentOS 7.x
|
||||
kernel to minimize the friction of developing and testing with partners'
|
||||
existing infrastructure. Once we're happy with the design we'll shift
|
||||
development to the upstream kernel while maintaining distro
|
||||
compatibility branches.
|
||||
|
||||
# Community Mailing List
|
||||
|
||||
Please join us on the open scoutfs-devel@scoutfs.org [mailing list
|
||||
hosted on Google Groups](https://groups.google.com/a/scoutfs.org/forum/#!forum/scoutfs-devel)
|
||||
for all discussion of scoutfs.
|
||||
|
||||
# Quick Start
|
||||
|
||||
**This following a very rough example of the procedure to get up and
|
||||
running, experience will be needed to fill in the gaps. We're happy to
|
||||
help on the mailing list.**
|
||||
|
||||
The requirements for running scoutfs on a small cluster are:
|
||||
|
||||
1. One or more nodes running x86-64 CentOS/RHEL 7.4 (or 7.3)
|
||||
2. Access to two shared block devices
|
||||
3. IPv4 connectivity between the nodes
|
||||
|
||||
The steps for getting scoutfs mounted and operational are:
|
||||
|
||||
1. Get the kernel module running on the nodes
|
||||
2. Make a new filesystem on the devices with the userspace utilities
|
||||
3. Mount the devices on all the nodes
|
||||
|
||||
In this example we use three nodes. The names of the block devices are
|
||||
the same on all the nodes. Two of the nodes will be quorum members. A
|
||||
majority of quorum members must be mounted to elect a leader to run a
|
||||
server that all the mounts connect to. It should be noted that two
|
||||
quorum members results in a majority of one, each member itself, so
|
||||
split brain elections are possible but so unlikely that it's fine for a
|
||||
demonstration.
|
||||
|
||||
1. Get the Kernel Module and Userspace Binaries
|
||||
|
||||
* Either use snapshot RPMs built from git by Versity:
|
||||
|
||||
```shell
|
||||
rpm -i https://scoutfs.s3-us-west-2.amazonaws.com/scoutfs-repo-0.0.1-1.el7_4.noarch.rpm
|
||||
yum install scoutfs-utils kmod-scoutfs
|
||||
```
|
||||
|
||||
* Or use the binaries built from checked out git repositories:
|
||||
|
||||
```shell
|
||||
yum install kernel-devel
|
||||
git clone git@github.com:versity/scoutfs.git
|
||||
make -C scoutfs
|
||||
modprobe libcrc32c
|
||||
insmod scoutfs/kmod/src/scoutfs.ko
|
||||
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
|
||||
```
|
||||
|
||||
2. Make a New Filesystem (**destroys contents**)
|
||||
|
||||
We specify quorum slots with the addresses of each of the quorum
|
||||
member nodes, the metadata device, and the data device.
|
||||
|
||||
```shell
|
||||
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
|
||||
```
|
||||
|
||||
3. Mount the Filesystem
|
||||
|
||||
First, mount each of the quorum nodes so that they can elect and
|
||||
start a server for the remaining node to connect to. The slot numbers
|
||||
were specified with the leading "0,..." and "1,..." in the mkfs options
|
||||
above.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
Then mount the remaining node which can now connect to the running server.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
4. For Kicks, Observe the Metadata Change Index
|
||||
|
||||
The `meta_seq` index tracks the inodes that are changed in each
|
||||
transaction.
|
||||
|
||||
```shell
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/one; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/two; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/one; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
```
|
||||
|
||||
39
ReleaseNotes.md
Normal file
39
ReleaseNotes.md
Normal file
@@ -0,0 +1,39 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.2-rc
|
||||
\
|
||||
*TBD*
|
||||
|
||||
---
|
||||
v1.1
|
||||
\
|
||||
*Feb 4, 2022*
|
||||
|
||||
|
||||
* **Add scoutfs(1) change-quorum-config command**
|
||||
\
|
||||
Add a change-quorum-config command to scoutfs(1) to change the quorum
|
||||
configuration stored in the metadata device while the file system is
|
||||
unmounted. This can be used to change the mounts that will
|
||||
participate in quorum and the IP addresses they use.
|
||||
|
||||
* **Fix Rare Risk of Item Cache Corruption**
|
||||
\
|
||||
Code review found a rare potential source of item cache corruption.
|
||||
If this happened it would look as though deleted parts of the filesystem
|
||||
returned, but only at the time they were deleted. Old deleted items are
|
||||
not affected. This problem only affected the item cache, never
|
||||
persistent storage. Unmounting and remounting would drop the bad item
|
||||
cache and resync it with the correct persistent data.
|
||||
|
||||
---
|
||||
v1.0
|
||||
\
|
||||
*Nov 8, 2021*
|
||||
|
||||
|
||||
* **Initial Release**
|
||||
\
|
||||
Version 1.0 marks the first GA release.
|
||||
@@ -18,6 +18,7 @@ scoutfs-y += \
|
||||
dir.o \
|
||||
export.o \
|
||||
ext.o \
|
||||
fence.o \
|
||||
file.o \
|
||||
forest.o \
|
||||
inode.o \
|
||||
@@ -42,6 +43,7 @@ scoutfs-y += \
|
||||
trans.o \
|
||||
triggers.o \
|
||||
tseq.o \
|
||||
volopt.o \
|
||||
xattr.o
|
||||
|
||||
#
|
||||
|
||||
561
kmod/src/alloc.c
561
kmod/src/alloc.c
@@ -29,8 +29,8 @@
|
||||
* The core allocator uses extent items in btrees rooted in the super.
|
||||
* Each free extent is stored in two items. The first item is indexed
|
||||
* by block location and is used to merge adjacent extents when freeing.
|
||||
* The second item is indexed by length and is used to find large
|
||||
* extents to allocate from.
|
||||
* The second item is indexed by the order of the length and is used to
|
||||
* find large extents to allocate from.
|
||||
*
|
||||
* Free extent always consumes the front of the largest extent. This
|
||||
* attempts to discourage fragmentation by given smaller freed extents
|
||||
@@ -67,25 +67,52 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length, largest first. The block location key
|
||||
* is set to the final block in the extent so that we can find
|
||||
* intersections by calling _next() iterators starting with the block
|
||||
* we're searching for.
|
||||
* Return the order of the length of a free extent, which we define as
|
||||
* floor(log_8_(len)): 0..7 = 0, 8..63 = 1, etc.
|
||||
*/
|
||||
static void init_ext_key(struct scoutfs_key *key, int type, u64 start, u64 len)
|
||||
static u64 free_extent_order(u64 len)
|
||||
{
|
||||
return (fls64(len | 1) - 1) / 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* The smallest (non-zero) length that will be mapped to the same order
|
||||
* as the given length.
|
||||
*/
|
||||
static u64 smallest_order_length(u64 len)
|
||||
{
|
||||
return 1ULL << (free_extent_order(len) * 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length order, largest first. The location key
|
||||
* field is set to the final block in the extent so that we can find
|
||||
* intersections by calling _next() with the start of the range we're
|
||||
* searching for.
|
||||
*
|
||||
* We never store 0 length extents but we do build keys for searching
|
||||
* the order index from 0,0 without having to map it to a real extent.
|
||||
*/
|
||||
static void init_ext_key(struct scoutfs_key *key, int zone, u64 start, u64 len)
|
||||
{
|
||||
*key = (struct scoutfs_key) {
|
||||
.sk_zone = SCOUTFS_FREE_EXTENT_ZONE,
|
||||
.sk_type = type,
|
||||
.sk_zone = zone,
|
||||
};
|
||||
|
||||
if (type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE) {
|
||||
if (len == 0) {
|
||||
/* we only use 0 len extents for magic 0,0 order lookups */
|
||||
WARN_ON_ONCE(zone != SCOUTFS_FREE_EXTENT_ORDER_ZONE || start != 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
key->skfb_end = cpu_to_le64(start + len - 1);
|
||||
key->skfb_len = cpu_to_le64(len);
|
||||
} else if (type == SCOUTFS_FREE_EXTENT_LEN_TYPE) {
|
||||
key->skfl_neglen = cpu_to_le64(-len);
|
||||
key->skfl_blkno = cpu_to_le64(start);
|
||||
} else if (zone == SCOUTFS_FREE_EXTENT_ORDER_ZONE) {
|
||||
key->skfo_revord = cpu_to_le64(U64_MAX - free_extent_order(len));
|
||||
key->skfo_end = cpu_to_le64(start + len - 1);
|
||||
key->skfo_len = cpu_to_le64(len);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
@@ -93,23 +120,27 @@ static void init_ext_key(struct scoutfs_key *key, int type, u64 start, u64 len)
|
||||
|
||||
static void ext_from_key(struct scoutfs_extent *ext, struct scoutfs_key *key)
|
||||
{
|
||||
if (key->sk_type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE) {
|
||||
if (key->sk_zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
ext->start = le64_to_cpu(key->skfb_end) -
|
||||
le64_to_cpu(key->skfb_len) + 1;
|
||||
ext->len = le64_to_cpu(key->skfb_len);
|
||||
} else {
|
||||
ext->start = le64_to_cpu(key->skfl_blkno);
|
||||
ext->len = -le64_to_cpu(key->skfl_neglen);
|
||||
ext->start = le64_to_cpu(key->skfo_end) -
|
||||
le64_to_cpu(key->skfo_len) + 1;
|
||||
ext->len = le64_to_cpu(key->skfo_len);
|
||||
}
|
||||
ext->map = 0;
|
||||
ext->flags = 0;
|
||||
|
||||
/* we never store 0 length extents */
|
||||
WARN_ON_ONCE(ext->len == 0);
|
||||
}
|
||||
|
||||
struct alloc_ext_args {
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
struct scoutfs_alloc_root *root;
|
||||
int type;
|
||||
int zone;
|
||||
};
|
||||
|
||||
static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
@@ -120,13 +151,13 @@ static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
|
||||
ret = scoutfs_btree_next(sb, &args->root->root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len != 0)
|
||||
ret = -EIO;
|
||||
else if (iref.key->sk_type != args->type)
|
||||
else if (iref.key->sk_zone != args->zone)
|
||||
ret = -ENOENT;
|
||||
else
|
||||
ext_from_key(ext, iref.key);
|
||||
@@ -139,19 +170,19 @@ static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int other_type(int type)
|
||||
static int other_zone(int zone)
|
||||
{
|
||||
if (type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE)
|
||||
return SCOUTFS_FREE_EXTENT_LEN_TYPE;
|
||||
else if (type == SCOUTFS_FREE_EXTENT_LEN_TYPE)
|
||||
return SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
if (zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE)
|
||||
return SCOUTFS_FREE_EXTENT_ORDER_ZONE;
|
||||
else if (zone == SCOUTFS_FREE_EXTENT_ORDER_ZONE)
|
||||
return SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert an extent along with its matching item which is indexed by
|
||||
* opposite of its len or blkno. If we succeed we update the root's
|
||||
* opposite of its order or blkno. If we succeed we update the root's
|
||||
* record of the total length of all the stored extents.
|
||||
*/
|
||||
static int alloc_ext_insert(struct super_block *sb, void *arg,
|
||||
@@ -167,8 +198,8 @@ static int alloc_ext_insert(struct super_block *sb, void *arg,
|
||||
if (WARN_ON_ONCE(map || flags))
|
||||
return -EINVAL;
|
||||
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&other, other_type(args->type), start, len);
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&other, other_zone(args->zone), start, len);
|
||||
|
||||
ret = scoutfs_btree_insert(sb, args->alloc, args->wri,
|
||||
&args->root->root, &key, NULL, 0);
|
||||
@@ -196,8 +227,8 @@ static int alloc_ext_remove(struct super_block *sb, void *arg,
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&other, other_type(args->type), start, len);
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&other, other_zone(args->zone), start, len);
|
||||
|
||||
ret = scoutfs_btree_delete(sb, args->alloc, args->wri,
|
||||
&args->root->root, &key);
|
||||
@@ -221,6 +252,7 @@ static struct scoutfs_ext_ops alloc_ext_ops = {
|
||||
.next = alloc_ext_next,
|
||||
.insert = alloc_ext_insert,
|
||||
.remove = alloc_ext_remove,
|
||||
.insert_overlap_warn = true,
|
||||
};
|
||||
|
||||
static bool invalid_extent(u64 start, u64 end, u64 first, u64 last)
|
||||
@@ -230,20 +262,17 @@ static bool invalid_extent(u64 start, u64 end, u64 first, u64 last)
|
||||
|
||||
static bool invalid_meta_blkno(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
u64 last_meta = (i_size_read(sbi->meta_bdev->bd_inode) >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
|
||||
|
||||
return invalid_extent(blkno, blkno,
|
||||
le64_to_cpu(super->first_meta_blkno),
|
||||
le64_to_cpu(super->last_meta_blkno));
|
||||
return invalid_extent(blkno, blkno, SCOUTFS_META_DEV_START_BLKNO, last_meta);
|
||||
}
|
||||
|
||||
static bool invalid_data_extent(struct super_block *sb, u64 start, u64 len)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
u64 last_data = (i_size_read(sb->s_bdev->bd_inode) >> SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
|
||||
return invalid_extent(start, start + len - 1,
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno));
|
||||
return invalid_extent(start, start + len - 1, SCOUTFS_DATA_DEV_START_BLKNO, last_data);
|
||||
}
|
||||
|
||||
void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
|
||||
@@ -619,7 +648,7 @@ int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
@@ -645,6 +674,14 @@ int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
*
|
||||
* Unlike meta allocations, the caller is expected to serialize
|
||||
* allocations from the root.
|
||||
*
|
||||
* ENOBUFS is returned if the data allocator ran out of space and we can
|
||||
* probably refill it from the server. The caller is expected to back
|
||||
* out, commit the transaction, and try again.
|
||||
*
|
||||
* ENOSPC is returned if the data allocator ran out of space but we have
|
||||
* a flag from the server telling us that there's no more space
|
||||
* available. This is a hard error and should be returned.
|
||||
*/
|
||||
int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
@@ -655,7 +692,7 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_extent ext;
|
||||
u64 len;
|
||||
@@ -693,13 +730,13 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Special retval meaning there wasn't space to alloc from
|
||||
* this txn. Doesn't mean filesystem is completely full.
|
||||
* Maybe upper layers want to try again.
|
||||
*/
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENOBUFS;
|
||||
if (ret == -ENOENT) {
|
||||
if (le32_to_cpu(dalloc->root.flags) & SCOUTFS_ALLOC_FLAG_LOW)
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = -ENOBUFS;
|
||||
}
|
||||
|
||||
*blkno_ret = 0;
|
||||
*count_ret = 0;
|
||||
} else {
|
||||
@@ -728,7 +765,7 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
@@ -741,6 +778,95 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the first zone bit that the extent intersects with.
|
||||
*/
|
||||
static int first_extent_zone(struct scoutfs_extent *ext, __le64 *zones, u64 zone_blocks)
|
||||
{
|
||||
int first;
|
||||
int last;
|
||||
int nr;
|
||||
|
||||
first = div64_u64(ext->start, zone_blocks);
|
||||
last = div64_u64(ext->start + ext->len - 1, zone_blocks);
|
||||
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, first);
|
||||
if (nr <= last)
|
||||
return nr;
|
||||
|
||||
return SCOUTFS_DATA_ALLOC_MAX_ZONES;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an extent in specific zones to satisfy an allocation. We use
|
||||
* the order items to search for the largest extent that intersects with
|
||||
* the zones whose bits are set in the caller's bitmap.
|
||||
*/
|
||||
static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
__le64 *zones, u64 zone_blocks,
|
||||
struct scoutfs_extent *found_ret, u64 count,
|
||||
struct scoutfs_extent *ext_ret)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
u64 start;
|
||||
u64 len;
|
||||
int nr;
|
||||
int ret;
|
||||
|
||||
/* don't bother when there are no bits set */
|
||||
if (find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, 0) ==
|
||||
SCOUTFS_DATA_ALLOC_MAX_ZONES)
|
||||
return -ENOENT;
|
||||
|
||||
/* start searching for largest extent from the first zone */
|
||||
len = smallest_order_length(SCOUTFS_BLOCK_SM_MAX);
|
||||
nr = 0;
|
||||
|
||||
for (;;) {
|
||||
/* search for extents in the next zone at our order */
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, nr);
|
||||
if (nr >= SCOUTFS_DATA_ALLOC_MAX_ZONES) {
|
||||
/* wrap down to next smaller order if we run out of bits */
|
||||
len >>= 3;
|
||||
if (len == 0) {
|
||||
ret = -ENOENT;
|
||||
break;
|
||||
}
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, 0);
|
||||
}
|
||||
|
||||
start = (u64)nr * zone_blocks;
|
||||
|
||||
ret = scoutfs_ext_next(sb, &alloc_ext_ops, &args, start, len, &found);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* see if the next extent intersects any zones */
|
||||
nr = first_extent_zone(&found, zones, zone_blocks);
|
||||
if (nr < SCOUTFS_DATA_ALLOC_MAX_ZONES) {
|
||||
start = (u64)nr * zone_blocks;
|
||||
|
||||
ext.start = max(start, found.start);
|
||||
ext.len = min(count, found.start + found.len - ext.start);
|
||||
|
||||
*found_ret = found;
|
||||
*ext_ret = ext;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* continue searching past extent */
|
||||
nr = div64_u64(found.start + found.len - 1, zone_blocks) + 1;
|
||||
len = smallest_order_length(found.len);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move extent items adding up to the requested total length from the
|
||||
@@ -751,6 +877,11 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
* -ENOENT is returned if we run out of extents in the source tree
|
||||
* before moving the total.
|
||||
*
|
||||
* The caller can specify that extents in the source tree should first
|
||||
* be found based on their zone bitmaps. We'll first try to find
|
||||
* extents in the exclusive zones, then vacant zones, and then we'll
|
||||
* fall back to normal allocation that ignores zones.
|
||||
*
|
||||
* This first pass is not optimal because it performs full btree walks
|
||||
* per extent. We could optimize this with more clever btree item
|
||||
* manipulation functions which can iterate through src and dst blocks
|
||||
@@ -759,32 +890,77 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total)
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
u64 moved = 0;
|
||||
u64 count;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
if (zone_blocks == 0) {
|
||||
exclusive = NULL;
|
||||
vacant = NULL;
|
||||
}
|
||||
|
||||
while (moved < total) {
|
||||
args.root = src;
|
||||
args.type = SCOUTFS_FREE_EXTENT_LEN_TYPE;
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args,
|
||||
0, 0, total - moved, &ext);
|
||||
count = total - moved;
|
||||
|
||||
if (exclusive) {
|
||||
/* first try to find extents in our exclusive zones */
|
||||
ret = find_zone_extent(sb, src, exclusive, zone_blocks,
|
||||
&found, count, &ext);
|
||||
if (ret == -ENOENT) {
|
||||
exclusive = NULL;
|
||||
continue;
|
||||
}
|
||||
} else if (vacant) {
|
||||
/* then try to find extents in vacant zones */
|
||||
ret = find_zone_extent(sb, src, vacant, zone_blocks,
|
||||
&found, count, &ext);
|
||||
if (ret == -ENOENT) {
|
||||
vacant = NULL;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
/* otherwise fall back to finding extents anywhere */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE;
|
||||
ret = scoutfs_ext_next(sb, &alloc_ext_ops, &args, 0, 0, &found);
|
||||
if (ret == 0) {
|
||||
ext.start = found.start;
|
||||
ext.len = min(count, found.len);
|
||||
}
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* searching set start/len, finish initializing alloced extent */
|
||||
ext.map = found.map ? ext.start - found.start + found.map : 0;
|
||||
ext.flags = found.flags;
|
||||
|
||||
/* remove the allocation from the found extent */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
ret = scoutfs_ext_remove(sb, &alloc_ext_ops, &args, ext.start, ext.len);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* insert the allocated extent into the dest */
|
||||
args.root = dst;
|
||||
args.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
ret = scoutfs_ext_insert(sb, &alloc_ext_ops, &args, ext.start,
|
||||
ext.len, ext.map, ext.flags);
|
||||
if (ret < 0) {
|
||||
/* and put it back in src if insertion failed */
|
||||
args.root = src;
|
||||
args.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
err = scoutfs_ext_insert(sb, &alloc_ext_ops, &args,
|
||||
ext.start, ext.len, ext.map,
|
||||
ext.flags);
|
||||
@@ -794,6 +970,8 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
|
||||
moved += ext.len;
|
||||
scoutfs_inc_counter(sb, alloc_moved_extent);
|
||||
|
||||
trace_scoutfs_alloc_move_extent(sb, &ext);
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, alloc_move);
|
||||
@@ -802,6 +980,39 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new free space to an allocator. _ext_insert will make sure that it doesn't
|
||||
* overlap with any existing extents. This is done by the server in a transaction that
|
||||
* also updates total_*_blocks in the super so we don't verify.
|
||||
*/
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
|
||||
return scoutfs_ext_insert(sb, &alloc_ext_ops, &args, start, len, 0, 0);
|
||||
}
|
||||
|
||||
int scoutfs_alloc_remove(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
|
||||
return scoutfs_ext_remove(sb, &alloc_ext_ops, &args, start, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* We only trim one block, instead of looping trimming all, because the
|
||||
* caller is assuming that we do a fixed amount of work when they check
|
||||
@@ -848,18 +1059,31 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the allocator has enough free blocks to cow (alloc and free)
|
||||
* a list block and all the btree blocks that store extent items.
|
||||
* True if the allocator has enough blocks in the avail list and space
|
||||
* in the freed list to be able to perform the callers operations. If
|
||||
* false the caller should back off and return partial progress rather
|
||||
* than completely exhausting the avail list or overflowing the freed
|
||||
* list.
|
||||
*
|
||||
* At most, an extent operation can dirty down three paths of the tree
|
||||
* to modify a blkno item and two distant len items. We can grow and
|
||||
* split the root, and then those three paths could share blocks but each
|
||||
* modify two leaf blocks.
|
||||
* An extent modification dirties three distinct leaves of an allocator
|
||||
* btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent. Dirtying the paths to these
|
||||
* leaves can grow the tree and grow/shrink neighbours at each level.
|
||||
* We over-estimate the number of blocks allocated and freed (the paths
|
||||
* share a root, growth doesn't free) to err on the simpler and safer
|
||||
* side. The overhead is minimal given the relatively large list blocks
|
||||
* and relatively short allocator trees.
|
||||
*
|
||||
* The caller tells us how many extents they're about to modify and how
|
||||
* many other additional blocks they may cow manually. And finally, the
|
||||
* caller could be the first to dirty the avail and freed blocks in the
|
||||
* allocator,
|
||||
*/
|
||||
static bool list_can_cow(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root)
|
||||
static bool list_has_blocks(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root, u32 extents, u32 addl_blocks)
|
||||
{
|
||||
u32 most = 1 + (1 + 1 + (3 * (1 - root->root.height + 1)));
|
||||
u32 tree_blocks = (((1 + root->root.height) * 2) * 3) * extents;
|
||||
u32 most = 1 + tree_blocks + addl_blocks;
|
||||
|
||||
if (le32_to_cpu(alloc->avail.first_nr) < most) {
|
||||
scoutfs_inc_counter(sb, alloc_list_avail_lo);
|
||||
@@ -901,7 +1125,7 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -923,8 +1147,7 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
goto out;
|
||||
lblk = bl->data;
|
||||
|
||||
while (le32_to_cpu(lblk->nr) < target &&
|
||||
list_can_cow(sb, alloc, root)) {
|
||||
while (le32_to_cpu(lblk->nr) < target && list_has_blocks(sb, alloc, root, 1, 0)) {
|
||||
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args, 0, 0,
|
||||
target - le32_to_cpu(lblk->nr), &ext);
|
||||
@@ -936,6 +1159,8 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
|
||||
for (i = 0; i < ext.len; i++)
|
||||
list_block_add(lhead, lblk, ext.start + i);
|
||||
|
||||
trace_scoutfs_alloc_fill_extent(sb, &ext);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -958,7 +1183,7 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
struct scoutfs_alloc_list_block *lblk = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -968,7 +1193,7 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(lhead_in_alloc(alloc, lhead)))
|
||||
return -EINVAL;
|
||||
|
||||
while (lhead->ref.blkno && list_can_cow(sb, alloc, args.root)) {
|
||||
while (lhead->ref.blkno && list_has_blocks(sb, alloc, args.root, 1, 1)) {
|
||||
|
||||
if (lhead->first_nr == 0) {
|
||||
ret = trim_empty_first_block(sb, alloc, wri, lhead);
|
||||
@@ -1004,6 +1229,8 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
break;
|
||||
|
||||
list_block_remove(lhead, lblk, ext.len);
|
||||
|
||||
trace_scoutfs_alloc_empty_extent(sb, &ext);
|
||||
}
|
||||
|
||||
scoutfs_block_put(sb, bl);
|
||||
@@ -1091,37 +1318,50 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
return lo;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the callers callback for every persistent allocator structure
|
||||
* we can find.
|
||||
*/
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag)
|
||||
{
|
||||
unsigned int seq;
|
||||
bool set;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
set = !!(le32_to_cpu(alloc->avail.flags) & flag);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
|
||||
return set;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over the allocator structures referenced by the caller's
|
||||
* super and call the caller's callback with summaries of the blocks
|
||||
* found in each structure.
|
||||
*
|
||||
* The caller's responsible for the stability of the referenced blocks.
|
||||
* If the blocks could be stale the caller must deal with retrying when
|
||||
* it sees ESTALE.
|
||||
*/
|
||||
int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_srch_compact *sc;
|
||||
struct scoutfs_log_merge_request *lmreq;
|
||||
struct scoutfs_log_merge_complete *lmcomp;
|
||||
struct scoutfs_log_trees lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int expected;
|
||||
u64 avail_tot;
|
||||
u64 freed_tot;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
sc = kmalloc(sizeof(struct scoutfs_srch_compact), GFP_NOFS);
|
||||
if (!super || !sc) {
|
||||
if (!sc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
refs[0] = super->logs_root.ref;
|
||||
refs[1] = super->srch_root.ref;
|
||||
|
||||
/* all the server allocators */
|
||||
ret = cb(sb, arg, SCOUTFS_ALLOC_OWNER_SERVER, 0, true, true,
|
||||
le64_to_cpu(super->meta_alloc[0].total_len)) ?:
|
||||
@@ -1211,8 +1451,93 @@ retry:
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
/* log merge allocators */
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_REQUEST_ZONE;
|
||||
expected = sizeof(*lmreq);
|
||||
id = 0;
|
||||
avail_tot = 0;
|
||||
freed_tot = 0;
|
||||
|
||||
for (;;) {
|
||||
ret = scoutfs_btree_next(sb, &super->log_merge, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.key->sk_zone != key.sk_zone) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len == expected) {
|
||||
key = *iref.key;
|
||||
if (key.sk_zone == SCOUTFS_LOG_MERGE_REQUEST_ZONE) {
|
||||
lmreq = iref.val;
|
||||
id = le64_to_cpu(lmreq->rid);
|
||||
avail_tot = le64_to_cpu(lmreq->meta_avail.total_nr);
|
||||
freed_tot = le64_to_cpu(lmreq->meta_freed.total_nr);
|
||||
} else {
|
||||
lmcomp = iref.val;
|
||||
id = le64_to_cpu(lmcomp->rid);
|
||||
avail_tot = le64_to_cpu(lmcomp->meta_avail.total_nr);
|
||||
freed_tot = le64_to_cpu(lmcomp->meta_freed.total_nr);
|
||||
}
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret == -ENOENT) {
|
||||
if (key.sk_zone == SCOUTFS_LOG_MERGE_REQUEST_ZONE) {
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_COMPLETE_ZONE;
|
||||
expected = sizeof(*lmcomp);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = cb(sb, arg, SCOUTFS_ALLOC_OWNER_LOG_MERGE, id, true, true, avail_tot) ?:
|
||||
cb(sb, arg, SCOUTFS_ALLOC_OWNER_LOG_MERGE, id, true, false, freed_tot);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
kfree(sc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the current on-disk super and use it to walk the allocators and
|
||||
* call the caller's callback. This assumes that the super it's reading
|
||||
* could be stale and will retry if it encounters stale blocks.
|
||||
*/
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
refs[0] = super->logs_root.ref;
|
||||
refs[1] = super->srch_root.ref;
|
||||
|
||||
ret = scoutfs_alloc_foreach_super(sb, super, cb, arg);
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&stale_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
@@ -1224,6 +1549,64 @@ out:
|
||||
}
|
||||
|
||||
kfree(super);
|
||||
kfree(sc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct foreach_cb_args {
|
||||
scoutfs_alloc_extent_cb_t cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static int alloc_btree_extent_item_cb(struct super_block *sb, struct scoutfs_key *key, u64 seq,
|
||||
u8 flags, void *val, int val_len, void *arg)
|
||||
{
|
||||
struct foreach_cb_args *cba = arg;
|
||||
struct scoutfs_extent ext;
|
||||
|
||||
if (key->sk_zone != SCOUTFS_FREE_EXTENT_BLKNO_ZONE)
|
||||
return -ENOENT;
|
||||
|
||||
ext_from_key(&ext, key);
|
||||
cba->cb(sb, cba->cb_arg, &ext);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the caller's callback on each extent stored in the allocator's
|
||||
* btree. The callback sees extents called in order by starting blkno.
|
||||
*/
|
||||
int scoutfs_alloc_extents_cb(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
scoutfs_alloc_extent_cb_t cb, void *cb_arg)
|
||||
{
|
||||
struct foreach_cb_args cba = {
|
||||
.cb = cb,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_ext_key(&key, SCOUTFS_FREE_EXTENT_BLKNO_ZONE, 0, 1);
|
||||
|
||||
for (;;) {
|
||||
/* will stop at order items before getting stuck in final block */
|
||||
BUILD_BUG_ON(SCOUTFS_FREE_EXTENT_BLKNO_ZONE > SCOUTFS_FREE_EXTENT_ORDER_ZONE);
|
||||
init_ext_key(&start, SCOUTFS_FREE_EXTENT_BLKNO_ZONE, 0, 1);
|
||||
init_ext_key(&end, SCOUTFS_FREE_EXTENT_ORDER_ZONE, 0, 1);
|
||||
|
||||
ret = scoutfs_btree_read_items(sb, &root->root, &key, &start, &end,
|
||||
alloc_btree_extent_item_cb, &cba);
|
||||
if (ret < 0 || end.sk_zone != SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
key = end;
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -38,6 +38,10 @@
|
||||
#define SCOUTFS_ALLOC_DATA_LG_THRESH \
|
||||
(8ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_ALLOC_DATA_REFILL_THRESH \
|
||||
((256ULL * 1024 * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* Fill client alloc roots to the target when they fall below the lo
|
||||
* threshold.
|
||||
@@ -55,15 +59,16 @@
|
||||
#define SCOUTFS_SERVER_DATA_FILL_LO \
|
||||
(1ULL * 1024 * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
|
||||
/*
|
||||
* Each of the server meta_alloc roots will try to keep a minimum amount
|
||||
* of free blocks. The server will swap roots when its current avail
|
||||
* falls below the threshold while the freed root is still above it. It
|
||||
* must have room for all the largest allocation attempted in a
|
||||
* transaction on the server.
|
||||
* Log merge meta allocations are only used for one request and will
|
||||
* never use more than the dirty limit.
|
||||
*/
|
||||
#define SCOUTFS_SERVER_META_ALLOC_MIN \
|
||||
(SCOUTFS_SERVER_META_FILL_TARGET * 2)
|
||||
#define SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT (64ULL * 1024 * 1024)
|
||||
/* a few extra blocks for alloc blocks */
|
||||
#define SCOUTFS_SERVER_MERGE_FILL_TARGET \
|
||||
((SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT >> SCOUTFS_BLOCK_LG_SHIFT) + 4)
|
||||
#define SCOUTFS_SERVER_MERGE_FILL_LO SCOUTFS_SERVER_MERGE_FILL_TARGET
|
||||
|
||||
/*
|
||||
* A run-time use of a pair of persistent avail/freed roots as a
|
||||
@@ -125,7 +130,14 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total);
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks);
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
int scoutfs_alloc_remove(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
|
||||
int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
@@ -146,11 +158,20 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr);
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag);
|
||||
|
||||
typedef int (*scoutfs_alloc_foreach_cb_t)(struct super_block *sb, void *arg,
|
||||
int owner, u64 id,
|
||||
bool meta, bool avail, u64 blocks);
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg);
|
||||
int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg);
|
||||
|
||||
typedef void (*scoutfs_alloc_extent_cb_t)(struct super_block *sb, void *cb_arg,
|
||||
struct scoutfs_extent *ext);
|
||||
int scoutfs_alloc_extents_cb(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
scoutfs_alloc_extent_cb_t cb, void *cb_arg);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -200,7 +200,9 @@ static void block_free(struct super_block *sb, struct block_private *bp)
|
||||
else
|
||||
BUG();
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&bp->dirty_entry));
|
||||
/* ok to tear down dirty blocks when forcing unmount */
|
||||
WARN_ON_ONCE(!scoutfs_forcing_unmount(sb) && !list_empty(&bp->dirty_entry));
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount));
|
||||
WARN_ON_ONCE(atomic_read(&bp->io_count));
|
||||
kfree(bp);
|
||||
@@ -485,6 +487,9 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
sector_t sector;
|
||||
int ret = 0;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
sector = bp->bl.blkno << (SCOUTFS_BLOCK_LG_SHIFT - 9);
|
||||
|
||||
WARN_ON_ONCE(bp->bl.blkno == U64_MAX);
|
||||
@@ -640,9 +645,11 @@ static struct block_private *block_read(struct super_block *sb, u64 blkno)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible(binf->waitq, uptodate_or_error(bp));
|
||||
if (ret == 0 && test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
wait_event(binf->waitq, uptodate_or_error(bp));
|
||||
if (test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
@@ -1148,7 +1155,7 @@ static void sm_block_bio_end_io(struct bio *bio, int err)
|
||||
* only layer that sees the full block buffer so we pass the calculated
|
||||
* crc to the caller for them to check in their context.
|
||||
*/
|
||||
static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw, u64 blkno,
|
||||
struct scoutfs_block_header *hdr, size_t len,
|
||||
__le32 *blk_crc)
|
||||
{
|
||||
@@ -1160,6 +1167,9 @@ static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
|
||||
BUILD_BUG_ON(PAGE_SIZE < SCOUTFS_BLOCK_SM_SIZE);
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
if (WARN_ON_ONCE(len > SCOUTFS_BLOCK_SM_SIZE) ||
|
||||
WARN_ON_ONCE(!(rw & WRITE) && !blk_crc))
|
||||
return -EINVAL;
|
||||
@@ -1212,14 +1222,14 @@ int scoutfs_block_read_sm(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr, size_t len,
|
||||
__le32 *blk_crc)
|
||||
{
|
||||
return sm_block_io(bdev, READ, blkno, hdr, len, blk_crc);
|
||||
return sm_block_io(sb, bdev, READ, blkno, hdr, len, blk_crc);
|
||||
}
|
||||
|
||||
int scoutfs_block_write_sm(struct super_block *sb,
|
||||
struct block_device *bdev, u64 blkno,
|
||||
struct scoutfs_block_header *hdr, size_t len)
|
||||
{
|
||||
return sm_block_io(bdev, WRITE, blkno, hdr, len, NULL);
|
||||
return sm_block_io(sb, bdev, WRITE, blkno, hdr, len, NULL);
|
||||
}
|
||||
|
||||
int scoutfs_block_setup(struct super_block *sb)
|
||||
|
||||
946
kmod/src/btree.c
946
kmod/src/btree.c
File diff suppressed because it is too large
Load Diff
@@ -20,13 +20,15 @@ struct scoutfs_btree_item_ref {
|
||||
|
||||
/* caller gives an item to the callback */
|
||||
typedef int (*scoutfs_btree_item_cb)(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
void *val, int val_len, void *arg);
|
||||
|
||||
/* simple singly-linked list of items */
|
||||
struct scoutfs_btree_item_list {
|
||||
struct scoutfs_btree_item_list *next;
|
||||
struct scoutfs_key key;
|
||||
u64 seq;
|
||||
u8 flags;
|
||||
int val_len;
|
||||
u8 val[0];
|
||||
};
|
||||
@@ -82,6 +84,49 @@ int scoutfs_btree_insert_list(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_item_list *lst);
|
||||
|
||||
int scoutfs_btree_parent_range(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end);
|
||||
int scoutfs_btree_get_parent(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root);
|
||||
int scoutfs_btree_set_parent(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root);
|
||||
int scoutfs_btree_rebalance(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key);
|
||||
|
||||
/* merge input is a list of roots */
|
||||
struct scoutfs_btree_root_head {
|
||||
struct list_head head;
|
||||
struct scoutfs_btree_root root;
|
||||
};
|
||||
|
||||
int scoutfs_btree_merge(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
struct scoutfs_key *next_ret,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct list_head *input_list,
|
||||
bool subtree, int dirty_limit, int alloc_low);
|
||||
|
||||
int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int alloc_low);
|
||||
|
||||
void scoutfs_btree_put_iref(struct scoutfs_btree_item_ref *iref);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "endian_swap.h"
|
||||
#include "quorum.h"
|
||||
#include "omap.h"
|
||||
#include "trans.h"
|
||||
|
||||
/*
|
||||
* The client is responsible for maintaining a connection to the server.
|
||||
@@ -48,6 +49,7 @@ struct client_info {
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work connect_dwork;
|
||||
unsigned long connect_delay_jiffies;
|
||||
|
||||
u64 server_term;
|
||||
|
||||
@@ -115,21 +117,6 @@ int scoutfs_client_get_roots(struct super_block *sb,
|
||||
NULL, 0, roots, sizeof(*roots));
|
||||
}
|
||||
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
__le64 leseq;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
NULL, 0, &leseq, sizeof(leseq));
|
||||
if (ret == 0)
|
||||
*seq = le64_to_cpu(leseq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
@@ -216,6 +203,26 @@ int scoutfs_client_srch_commit_compact(struct super_block *sb,
|
||||
res, sizeof(*res), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_request *req)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_GET_LOG_MERGE,
|
||||
NULL, 0, req, sizeof(*req));
|
||||
}
|
||||
|
||||
int scoutfs_client_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_complete *comp)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_MERGE,
|
||||
comp, sizeof(*comp), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
@@ -249,6 +256,67 @@ int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
|
||||
&args, sizeof(args), map, sizeof(*map));
|
||||
}
|
||||
|
||||
/* The client is asking the server for the current volume options */
|
||||
int scoutfs_client_get_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_GET_VOLOPT,
|
||||
NULL, 0, volopt, sizeof(*volopt));
|
||||
}
|
||||
|
||||
/* The client is asking the server to update volume options */
|
||||
int scoutfs_client_set_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_SET_VOLOPT,
|
||||
volopt, sizeof(*volopt), NULL, 0);
|
||||
}
|
||||
|
||||
/* The client is asking the server to clear volume options */
|
||||
int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_CLEAR_VOLOPT,
|
||||
volopt, sizeof(*volopt), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_RESIZE_DEVICES,
|
||||
nrd, sizeof(*nrd), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_STATFS,
|
||||
NULL, 0, nst, sizeof(*nst));
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is asking that we trigger a commit of the current log
|
||||
* trees so that they can ensure an item seq discontinuity between
|
||||
* finalized log btrees and the next set of open log btrees. If we're
|
||||
* shutting down then we're already going to perform a final commit.
|
||||
*/
|
||||
static int sync_log_trees(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
if (arg_len != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (!scoutfs_unmounting(sb))
|
||||
scoutfs_trans_sync(sb, 0);
|
||||
|
||||
return scoutfs_net_response(sb, conn, cmd, id, 0, NULL, 0);
|
||||
}
|
||||
|
||||
/* The client is receiving a invalidation request from the server */
|
||||
static int client_lock(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn, u8 cmd, u64 id,
|
||||
@@ -286,7 +354,8 @@ static int client_greeting(struct super_block *sb,
|
||||
void *resp, unsigned int resp_len, int error,
|
||||
void *data)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct client_info *client = sbi->client_info;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_net_greeting *gr = resp;
|
||||
bool new_server;
|
||||
@@ -303,17 +372,15 @@ static int client_greeting(struct super_block *sb,
|
||||
}
|
||||
|
||||
if (gr->fsid != super->hdr.fsid) {
|
||||
scoutfs_warn(sb, "server sent fsid 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->fsid),
|
||||
le64_to_cpu(super->hdr.fsid));
|
||||
scoutfs_warn(sb, "server greeting response fsid 0x%llx did not match client fsid 0x%llx",
|
||||
le64_to_cpu(gr->fsid), le64_to_cpu(super->hdr.fsid));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "server sent format 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
if (le64_to_cpu(gr->fmt_vers) != sbi->fmt_vers) {
|
||||
scoutfs_warn(sb, "server greeting response format version %llu did not match client format version %llu",
|
||||
le64_to_cpu(gr->fmt_vers), sbi->fmt_vers);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -322,6 +389,7 @@ static int client_greeting(struct super_block *sb,
|
||||
scoutfs_net_client_greeting(sb, conn, new_server);
|
||||
|
||||
client->server_term = le64_to_cpu(gr->server_term);
|
||||
client->connect_delay_jiffies = 0;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
@@ -371,6 +439,20 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're not seeing successful connections we want to back off. Each
|
||||
* connection attempt starts by setting a long connection work delay.
|
||||
* We only set a shorter delay if we see a greeting response from the
|
||||
* server. At that point we'll try to immediately reconnect if the
|
||||
* connection is broken.
|
||||
*/
|
||||
static void queue_connect_dwork(struct super_block *sb, struct client_info *client)
|
||||
{
|
||||
if (!atomic_read(&client->shutting_down) && !scoutfs_forcing_unmount(sb))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork,
|
||||
client->connect_delay_jiffies);
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
@@ -410,6 +492,9 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* always wait a bit until a greeting response sets a lower delay */
|
||||
client->connect_delay_jiffies = msecs_to_jiffies(CLIENT_CONNECT_DELAY_MS);
|
||||
|
||||
ret = scoutfs_quorum_server_sin(sb, &sin);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -421,7 +506,7 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.version = super->version;
|
||||
greet.fmt_vers = cpu_to_le64(sbi->fmt_vers);
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
greet.flags = 0;
|
||||
@@ -437,14 +522,12 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
out:
|
||||
|
||||
/* always have a small delay before retrying to avoid storms */
|
||||
if (ret && !atomic_read(&client->shutting_down))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork,
|
||||
msecs_to_jiffies(CLIENT_CONNECT_DELAY_MS));
|
||||
if (ret)
|
||||
queue_connect_dwork(sb, client);
|
||||
}
|
||||
|
||||
static scoutfs_net_request_t client_req_funcs[] = {
|
||||
[SCOUTFS_NET_CMD_SYNC_LOG_TREES] = sync_log_trees,
|
||||
[SCOUTFS_NET_CMD_LOCK] = client_lock,
|
||||
[SCOUTFS_NET_CMD_LOCK_RECOVER] = client_lock_recover,
|
||||
[SCOUTFS_NET_CMD_OPEN_INO_MAP] = client_open_ino_map,
|
||||
@@ -460,8 +543,7 @@ static void client_notify_down(struct super_block *sb,
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
if (!atomic_read(&client->shutting_down))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork, 0);
|
||||
queue_connect_dwork(sb, client);
|
||||
}
|
||||
|
||||
int scoutfs_client_setup(struct super_block *sb)
|
||||
@@ -496,7 +578,7 @@ int scoutfs_client_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue_delayed_work(client->workq, &client->connect_dwork, 0);
|
||||
queue_connect_dwork(sb, client);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
@@ -553,7 +635,7 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
if (client == NULL)
|
||||
return;
|
||||
|
||||
if (client->server_term != 0) {
|
||||
if (client->server_term != 0 && !scoutfs_forcing_unmount(sb)) {
|
||||
client->sending_farewell = true;
|
||||
ret = scoutfs_net_submit_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_FAREWELL,
|
||||
@@ -561,10 +643,8 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
client_farewell_response,
|
||||
NULL, NULL);
|
||||
if (ret == 0) {
|
||||
ret = wait_for_completion_interruptible(
|
||||
&client->farewell_comp);
|
||||
if (ret == 0)
|
||||
ret = client->farewell_error;
|
||||
wait_for_completion(&client->farewell_comp);
|
||||
ret = client->farewell_error;
|
||||
}
|
||||
if (ret) {
|
||||
scoutfs_inc_counter(sb, client_farewell_error);
|
||||
@@ -588,3 +668,11 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
kfree(client);
|
||||
sbi->client_info = NULL;
|
||||
}
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
if (client && client->conn)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ int scoutfs_client_commit_log_trees(struct super_block *sb,
|
||||
int scoutfs_client_get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots);
|
||||
u64 *scoutfs_client_bulk_alloc(struct super_block *sb);
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq);
|
||||
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq);
|
||||
int scoutfs_client_lock_request(struct super_block *sb,
|
||||
struct scoutfs_net_lock *nl);
|
||||
@@ -22,11 +21,21 @@ int scoutfs_client_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *sc);
|
||||
int scoutfs_client_srch_commit_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *res);
|
||||
int scoutfs_client_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_request *req);
|
||||
int scoutfs_client_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_complete *comp);
|
||||
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map *map);
|
||||
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
|
||||
struct scoutfs_open_ino_map *map);
|
||||
int scoutfs_client_get_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_set_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd);
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst);
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb);
|
||||
int scoutfs_client_setup(struct super_block *sb);
|
||||
void scoutfs_client_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -44,6 +44,16 @@
|
||||
EXPAND_COUNTER(btree_insert) \
|
||||
EXPAND_COUNTER(btree_leaf_item_hash_search) \
|
||||
EXPAND_COUNTER(btree_lookup) \
|
||||
EXPAND_COUNTER(btree_merge) \
|
||||
EXPAND_COUNTER(btree_merge_alloc_low) \
|
||||
EXPAND_COUNTER(btree_merge_delete) \
|
||||
EXPAND_COUNTER(btree_merge_delta_combined) \
|
||||
EXPAND_COUNTER(btree_merge_delta_null) \
|
||||
EXPAND_COUNTER(btree_merge_dirty_limit) \
|
||||
EXPAND_COUNTER(btree_merge_drop_old) \
|
||||
EXPAND_COUNTER(btree_merge_insert) \
|
||||
EXPAND_COUNTER(btree_merge_update) \
|
||||
EXPAND_COUNTER(btree_merge_walk) \
|
||||
EXPAND_COUNTER(btree_next) \
|
||||
EXPAND_COUNTER(btree_prev) \
|
||||
EXPAND_COUNTER(btree_split) \
|
||||
@@ -83,6 +93,8 @@
|
||||
EXPAND_COUNTER(item_clear_dirty) \
|
||||
EXPAND_COUNTER(item_create) \
|
||||
EXPAND_COUNTER(item_delete) \
|
||||
EXPAND_COUNTER(item_delta) \
|
||||
EXPAND_COUNTER(item_delta_written) \
|
||||
EXPAND_COUNTER(item_dirty) \
|
||||
EXPAND_COUNTER(item_invalidate) \
|
||||
EXPAND_COUNTER(item_invalidate_page) \
|
||||
@@ -114,7 +126,6 @@
|
||||
EXPAND_COUNTER(lock_free) \
|
||||
EXPAND_COUNTER(lock_grant_request) \
|
||||
EXPAND_COUNTER(lock_grant_response) \
|
||||
EXPAND_COUNTER(lock_grant_work) \
|
||||
EXPAND_COUNTER(lock_invalidate_coverage) \
|
||||
EXPAND_COUNTER(lock_invalidate_inode) \
|
||||
EXPAND_COUNTER(lock_invalidate_request) \
|
||||
@@ -140,6 +151,12 @@
|
||||
EXPAND_COUNTER(net_recv_invalid_message) \
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(orphan_scan_read) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
@@ -161,6 +178,7 @@
|
||||
EXPAND_COUNTER(srch_add_entry) \
|
||||
EXPAND_COUNTER(srch_compact_dirty_block) \
|
||||
EXPAND_COUNTER(srch_compact_entry) \
|
||||
EXPAND_COUNTER(srch_compact_error) \
|
||||
EXPAND_COUNTER(srch_compact_flush) \
|
||||
EXPAND_COUNTER(srch_compact_log_page) \
|
||||
EXPAND_COUNTER(srch_compact_removed_entry) \
|
||||
@@ -175,6 +193,11 @@
|
||||
EXPAND_COUNTER(srch_search_xattrs) \
|
||||
EXPAND_COUNTER(srch_read_stale) \
|
||||
EXPAND_COUNTER(statfs) \
|
||||
EXPAND_COUNTER(totl_read_copied) \
|
||||
EXPAND_COUNTER(totl_read_finalized) \
|
||||
EXPAND_COUNTER(totl_read_fs) \
|
||||
EXPAND_COUNTER(totl_read_item) \
|
||||
EXPAND_COUNTER(totl_read_logged) \
|
||||
EXPAND_COUNTER(trans_commit_data_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_dirty_meta_full) \
|
||||
EXPAND_COUNTER(trans_commit_fsync) \
|
||||
|
||||
@@ -207,6 +207,7 @@ static s64 truncate_extents(struct super_block *sb, struct inode *inode,
|
||||
u64 offset;
|
||||
s64 ret;
|
||||
u8 flags;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
flags = offline ? SEF_OFFLINE : 0;
|
||||
@@ -246,6 +247,18 @@ static s64 truncate_extents(struct super_block *sb, struct inode *inode,
|
||||
tr.len = min(ext.len - offset, last - iblock + 1);
|
||||
tr.flags = ext.flags;
|
||||
|
||||
trace_scoutfs_data_extent_truncated(sb, ino, &tr);
|
||||
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &args,
|
||||
tr.start, tr.len, 0, flags);
|
||||
if (ret < 0) {
|
||||
if (WARN_ON_ONCE(ret == -EINVAL)) {
|
||||
scoutfs_err(sb, "unexpected truncate inconsistency: ino %llu iblock %llu last %llu, start %llu len %llu",
|
||||
ino, iblock, last, tr.start, tr.len);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (tr.map) {
|
||||
mutex_lock(&datinf->mutex);
|
||||
ret = scoutfs_free_data(sb, datinf->alloc,
|
||||
@@ -253,16 +266,16 @@ static s64 truncate_extents(struct super_block *sb, struct inode *inode,
|
||||
&datinf->data_freed,
|
||||
tr.map, tr.len);
|
||||
mutex_unlock(&datinf->mutex);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
err = scoutfs_ext_set(sb, &data_ext_ops, &args,
|
||||
tr.start, tr.len, tr.map, tr.flags);
|
||||
if (err < 0)
|
||||
scoutfs_err(sb, "truncate err %d restoring extent after error %lld: ino %llu start %llu len %llu",
|
||||
err, ret, ino, tr.start, tr.len);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace_scoutfs_data_extent_truncated(sb, ino, &tr);
|
||||
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &args,
|
||||
tr.start, tr.len, 0, flags);
|
||||
BUG_ON(ret); /* inconsistent, could prealloc items */
|
||||
|
||||
iblock += tr.len;
|
||||
}
|
||||
|
||||
@@ -312,10 +325,9 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
|
||||
while (iblock <= last) {
|
||||
if (inode)
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
|
||||
true);
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true, false);
|
||||
else
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
ret = scoutfs_hold_trans(sb, false);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -756,8 +768,7 @@ retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &wbd->ind_locks, inode,
|
||||
true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &wbd->ind_locks,
|
||||
ind_seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, &wbd->ind_locks, ind_seq, true);
|
||||
} while (ret > 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -819,6 +830,7 @@ static int scoutfs_write_end(struct file *file, struct address_space *mapping,
|
||||
scoutfs_inode_inc_data_version(inode);
|
||||
}
|
||||
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_update_inode_item(inode, wbd->lock, &wbd->ind_locks);
|
||||
scoutfs_inode_queue_writeback(inode);
|
||||
}
|
||||
@@ -1010,7 +1022,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
|
||||
while(iblock <= last) {
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1020,8 +1032,11 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
end = (iblock + ret) << SCOUTFS_BLOCK_SM_SHIFT;
|
||||
if (end > offset + len)
|
||||
end = offset + len;
|
||||
if (end > i_size_read(inode))
|
||||
if (end > i_size_read(inode)) {
|
||||
i_size_write(inode, end);
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_inode_inc_data_version(inode);
|
||||
}
|
||||
}
|
||||
if (ret >= 0)
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
@@ -1086,7 +1101,7 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
}
|
||||
|
||||
/* we're updating meta_seq with offline block count */
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1238,7 +1253,7 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
ret = scoutfs_inode_index_start(sb, &seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &locks, from, true) ?:
|
||||
scoutfs_inode_index_prepare(sb, &locks, to, true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &locks, seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, &locks, seq, false);
|
||||
if (ret > 0)
|
||||
continue;
|
||||
if (ret < 0)
|
||||
@@ -1353,10 +1368,12 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
cur_time = CURRENT_TIME;
|
||||
if (!is_stage) {
|
||||
to->i_ctime = to->i_mtime = cur_time;
|
||||
inode_inc_iversion(to);
|
||||
scoutfs_inode_inc_data_version(to);
|
||||
scoutfs_inode_set_data_seq(to);
|
||||
}
|
||||
from->i_ctime = from->i_mtime = cur_time;
|
||||
inode_inc_iversion(from);
|
||||
scoutfs_inode_inc_data_version(from);
|
||||
scoutfs_inode_set_data_seq(from);
|
||||
|
||||
@@ -1844,13 +1861,17 @@ int scoutfs_data_prepare_commit(struct super_block *sb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u64 scoutfs_data_alloc_free_bytes(struct super_block *sb)
|
||||
/*
|
||||
* Return true if the data allocator is lower than the caller's
|
||||
* requirement and we haven't been told by the server that we're out of
|
||||
* free extents.
|
||||
*/
|
||||
bool scoutfs_data_alloc_should_refill(struct super_block *sb, u64 blocks)
|
||||
{
|
||||
DECLARE_DATA_INFO(sb, datinf);
|
||||
|
||||
return scoutfs_dalloc_total_len(&datinf->dalloc) <<
|
||||
SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
return (scoutfs_dalloc_total_len(&datinf->dalloc) < blocks) &&
|
||||
!(le32_to_cpu(datinf->dalloc.root.flags) & SCOUTFS_ALLOC_FLAG_LOW);
|
||||
}
|
||||
|
||||
int scoutfs_data_setup(struct super_block *sb)
|
||||
|
||||
@@ -38,13 +38,6 @@ struct scoutfs_data_wait {
|
||||
.err = 0, \
|
||||
}
|
||||
|
||||
struct scoutfs_traced_extent {
|
||||
u64 iblock;
|
||||
u64 count;
|
||||
u64 blkno;
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
extern const struct address_space_operations scoutfs_file_aops;
|
||||
extern const struct file_operations scoutfs_file_fops;
|
||||
struct scoutfs_alloc;
|
||||
@@ -86,7 +79,7 @@ void scoutfs_data_init_btrees(struct super_block *sb,
|
||||
void scoutfs_data_get_btrees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt);
|
||||
int scoutfs_data_prepare_commit(struct super_block *sb);
|
||||
u64 scoutfs_data_alloc_free_bytes(struct super_block *sb);
|
||||
bool scoutfs_data_alloc_should_refill(struct super_block *sb, u64 blocks);
|
||||
|
||||
int scoutfs_data_setup(struct super_block *sb);
|
||||
void scoutfs_data_destroy(struct super_block *sb);
|
||||
|
||||
303
kmod/src/dir.c
303
kmod/src/dir.c
@@ -31,6 +31,7 @@
|
||||
#include "lock.h"
|
||||
#include "hash.h"
|
||||
#include "omap.h"
|
||||
#include "forest.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
@@ -135,8 +136,8 @@ static int alloc_dentry_info(struct dentry *dentry)
|
||||
{
|
||||
struct dentry_info *di;
|
||||
|
||||
/* XXX read mb? */
|
||||
if (dentry->d_fsdata)
|
||||
smp_rmb();
|
||||
if (dentry->d_op == &scoutfs_dentry_ops)
|
||||
return 0;
|
||||
|
||||
di = kmem_cache_zalloc(dentry_info_cache, GFP_NOFS);
|
||||
@@ -148,6 +149,7 @@ static int alloc_dentry_info(struct dentry *dentry)
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!dentry->d_fsdata) {
|
||||
dentry->d_fsdata = di;
|
||||
smp_wmb();
|
||||
d_set_d_op(dentry, &scoutfs_dentry_ops);
|
||||
}
|
||||
spin_unlock(&dentry->d_lock);
|
||||
@@ -253,7 +255,7 @@ static u64 dirent_name_hash(const char *name, unsigned int name_len)
|
||||
((u64)dirent_name_fingerprint(name, name_len) << 32);
|
||||
}
|
||||
|
||||
static u64 dirent_names_equal(const char *a_name, unsigned int a_len,
|
||||
static bool dirent_names_equal(const char *a_name, unsigned int a_len,
|
||||
const char *b_name, unsigned int b_len)
|
||||
{
|
||||
return a_len == b_len && memcmp(a_name, b_name, a_len) == 0;
|
||||
@@ -275,8 +277,7 @@ static int lookup_dirent(struct super_block *sb, u64 dir_ino, const char *name,
|
||||
|
||||
dent = alloc_dirent(SCOUTFS_NAME_LEN);
|
||||
if (!dent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_dirent_key(&key, SCOUTFS_DIRENT_TYPE, dir_ino, hash, 0);
|
||||
@@ -316,6 +317,52 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that the caller's dentry still precisely matches our dirent
|
||||
* items.
|
||||
*
|
||||
* The caller has a dentry that the vfs revalidated before they acquired
|
||||
* their locks. If the dentry is still covered by a lock we immediately
|
||||
* return 0. If not, we check items and return -ENOENT if a positive
|
||||
* dentry no longer matches the items or -EEXIST if a negative entry's
|
||||
* name now has an item.
|
||||
*/
|
||||
static int verify_entry(struct super_block *sb, u64 dir_ino, struct dentry *dentry,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct dentry_info *di = dentry->d_fsdata;
|
||||
struct scoutfs_dirent dent = {0,};
|
||||
const char *name;
|
||||
u64 dentry_ino;
|
||||
int name_len;
|
||||
u64 hash;
|
||||
int ret;
|
||||
|
||||
if (scoutfs_lock_is_covered(sb, &di->lock_cov))
|
||||
return 0;
|
||||
|
||||
dentry_ino = dentry->d_inode ? scoutfs_ino(dentry->d_inode) : 0;
|
||||
name = dentry->d_name.name;
|
||||
name_len = dentry->d_name.len;
|
||||
hash = dirent_name_hash(name, name_len);
|
||||
|
||||
ret = lookup_dirent(sb, dir_ino, name, name_len, hash, &dent, lock);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
return ret;
|
||||
|
||||
if (dentry_ino != le64_to_cpu(dent.ino) || di->hash != le64_to_cpu(dent.hash) ||
|
||||
di->pos != le64_to_cpu(dent.pos)) {
|
||||
if (dentry_ino)
|
||||
ret = -ENOENT;
|
||||
else
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
struct super_block *sb = dentry->d_sb;
|
||||
@@ -423,7 +470,7 @@ static struct dentry *scoutfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_dirent dent;
|
||||
struct scoutfs_dirent dent = {0,};
|
||||
struct inode *inode;
|
||||
u64 ino = 0;
|
||||
u64 hash;
|
||||
@@ -451,9 +498,11 @@ static struct dentry *scoutfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
ret = 0;
|
||||
} else if (ret == 0) {
|
||||
ino = le64_to_cpu(dent.ino);
|
||||
}
|
||||
if (ret == 0)
|
||||
update_dentry_info(sb, dentry, le64_to_cpu(dent.hash),
|
||||
le64_to_cpu(dent.pos), dir_lock);
|
||||
}
|
||||
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
out:
|
||||
@@ -462,7 +511,7 @@ out:
|
||||
else if (ino == 0)
|
||||
inode = NULL;
|
||||
else
|
||||
inode = scoutfs_iget(sb, ino);
|
||||
inode = scoutfs_iget(sb, ino, 0, 0);
|
||||
|
||||
/*
|
||||
* We can't splice dir aliases into the dcache. dir entries
|
||||
@@ -490,10 +539,10 @@ static int KC_DECLARE_READDIR(scoutfs_readdir, struct file *file,
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_dirent *dent;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_dirent *dent = NULL;
|
||||
struct scoutfs_key last_key;
|
||||
struct scoutfs_lock *dir_lock;
|
||||
struct scoutfs_key key;
|
||||
int name_len;
|
||||
u64 pos;
|
||||
int ret;
|
||||
@@ -503,8 +552,7 @@ static int KC_DECLARE_READDIR(scoutfs_readdir, struct file *file,
|
||||
|
||||
dent = alloc_dirent(SCOUTFS_NAME_LEN);
|
||||
if (!dent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_dirent_key(&last_key, SCOUTFS_READDIR_TYPE, scoutfs_ino(inode),
|
||||
@@ -571,18 +619,17 @@ static int add_entry_items(struct super_block *sb, u64 dir_ino, u64 hash,
|
||||
u64 ino, umode_t mode, struct scoutfs_lock *dir_lock,
|
||||
struct scoutfs_lock *inode_lock)
|
||||
{
|
||||
struct scoutfs_dirent *dent = NULL;
|
||||
struct scoutfs_key rdir_key;
|
||||
struct scoutfs_key ent_key;
|
||||
struct scoutfs_key lb_key;
|
||||
struct scoutfs_dirent *dent;
|
||||
bool del_ent = false;
|
||||
bool del_rdir = false;
|
||||
bool del_ent = false;
|
||||
int ret;
|
||||
|
||||
dent = alloc_dirent(name_len);
|
||||
if (!dent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* initialize the dent */
|
||||
@@ -669,6 +716,7 @@ static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, dev_t rdev,
|
||||
struct scoutfs_lock **dir_lock,
|
||||
struct scoutfs_lock **inode_lock,
|
||||
struct scoutfs_lock **orph_lock,
|
||||
struct list_head *ind_locks)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
@@ -701,11 +749,17 @@ static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (orph_lock) {
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, ino, orph_lock);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, ind_locks, dir, true) ?:
|
||||
scoutfs_inode_index_prepare_ino(sb, ind_locks, ino, mode) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq, true);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -725,9 +779,13 @@ out_unlock:
|
||||
if (ret) {
|
||||
scoutfs_inode_index_unlock(sb, ind_locks);
|
||||
scoutfs_unlock(sb, *dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, *inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
*dir_lock = NULL;
|
||||
scoutfs_unlock(sb, *inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
*inode_lock = NULL;
|
||||
if (orph_lock) {
|
||||
scoutfs_unlock(sb, *orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
*orph_lock = NULL;
|
||||
}
|
||||
|
||||
inode = ERR_PTR(ret);
|
||||
}
|
||||
@@ -742,6 +800,7 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
struct inode *inode = NULL;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_inode_info *si;
|
||||
LIST_HEAD(ind_locks);
|
||||
u64 hash;
|
||||
u64 pos;
|
||||
@@ -752,9 +811,14 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
|
||||
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
|
||||
inode = lock_hold_create(dir, dentry, mode, rdev,
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
&dir_lock, &inode_lock, NULL, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
ret = verify_entry(sb, scoutfs_ino(dir), dentry, dir_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
pos = SCOUTFS_I(dir)->next_readdir_pos++;
|
||||
|
||||
@@ -770,6 +834,10 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
i_size_write(dir, i_size_read(dir) + dentry->d_name.len);
|
||||
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = dir->i_mtime;
|
||||
si->crtime = inode->i_mtime;
|
||||
inode_inc_iversion(dir);
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_forest_inc_inode_count(sb);
|
||||
|
||||
if (S_ISDIR(mode)) {
|
||||
inc_nlink(inode);
|
||||
@@ -813,13 +881,15 @@ static int scoutfs_link(struct dentry *old_dentry,
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct scoutfs_lock *dir_lock;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool del_orphan;
|
||||
bool del_orphan = false;
|
||||
u64 dir_size;
|
||||
u64 ind_seq;
|
||||
u64 hash;
|
||||
u64 pos;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
|
||||
|
||||
@@ -842,14 +912,25 @@ static int scoutfs_link(struct dentry *old_dentry,
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = verify_entry(sb, scoutfs_ino(dir), dentry, dir_lock);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
dir_size = i_size_read(dir) + dentry->d_name.len;
|
||||
del_orphan = (inode->i_nlink == 0);
|
||||
|
||||
if (inode->i_nlink == 0) {
|
||||
del_orphan = true;
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, scoutfs_ino(inode),
|
||||
&orph_lock);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq, true);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -860,7 +941,7 @@ retry:
|
||||
goto out;
|
||||
|
||||
if (del_orphan) {
|
||||
ret = scoutfs_orphan_dirty(sb, scoutfs_ino(inode));
|
||||
ret = scoutfs_inode_orphan_delete(sb, scoutfs_ino(inode), orph_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@@ -871,19 +952,19 @@ retry:
|
||||
dentry->d_name.name, dentry->d_name.len,
|
||||
scoutfs_ino(inode), inode->i_mode, dir_lock,
|
||||
inode_lock);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
err = scoutfs_inode_orphan_create(sb, scoutfs_ino(inode), orph_lock);
|
||||
WARN_ON_ONCE(err); /* no orphan, might not scan and delete after crash */
|
||||
goto out;
|
||||
}
|
||||
update_dentry_info(sb, dentry, hash, pos, dir_lock);
|
||||
|
||||
i_size_write(dir, dir_size);
|
||||
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
|
||||
inode->i_ctime = dir->i_mtime;
|
||||
inc_nlink(inode);
|
||||
|
||||
if (del_orphan) {
|
||||
ret = scoutfs_orphan_delete(sb, scoutfs_ino(inode));
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
inode_inc_iversion(dir);
|
||||
inode_inc_iversion(inode);
|
||||
|
||||
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
@@ -896,6 +977,8 @@ out_unlock:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -920,6 +1003,7 @@ static int scoutfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct timespec ts = current_kernel_time();
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
u64 ind_seq;
|
||||
@@ -932,42 +1016,58 @@ static int scoutfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_dentry_info(dentry);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = verify_entry(sb, scoutfs_ino(dir), dentry, dir_lock);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
if (S_ISDIR(inode->i_mode) && i_size_read(inode)) {
|
||||
ret = -ENOTEMPTY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (should_orphan(inode)) {
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, scoutfs_ino(inode),
|
||||
&orph_lock);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq, false);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
if (should_orphan(inode)) {
|
||||
ret = scoutfs_inode_orphan_create(sb, scoutfs_ino(inode), orph_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = del_entry_items(sb, scoutfs_ino(dir), dentry_info_hash(dentry),
|
||||
dentry_info_pos(dentry), scoutfs_ino(inode),
|
||||
dir_lock, inode_lock);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ret = scoutfs_inode_orphan_delete(sb, scoutfs_ino(inode), orph_lock);
|
||||
WARN_ON_ONCE(ret); /* should have been dirty */
|
||||
goto out;
|
||||
|
||||
if (should_orphan(inode)) {
|
||||
/*
|
||||
* Insert the orphan item before we modify any inode
|
||||
* metadata so we can gracefully exit should it
|
||||
* fail.
|
||||
*/
|
||||
ret = scoutfs_orphan_inode(inode);
|
||||
WARN_ON_ONCE(ret); /* XXX returning error but items deleted */
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
update_dentry_info(sb, dentry, 0, 0, dir_lock);
|
||||
|
||||
dir->i_ctime = ts;
|
||||
dir->i_mtime = ts;
|
||||
i_size_write(dir, i_size_read(dir) - dentry->d_name.len);
|
||||
inode_inc_iversion(dir);
|
||||
inode_inc_iversion(inode);
|
||||
|
||||
inode->i_ctime = ts;
|
||||
drop_nlink(inode);
|
||||
@@ -984,6 +1084,7 @@ unlock:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1159,6 +1260,7 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
struct inode *inode = NULL;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_inode_info *si;
|
||||
LIST_HEAD(ind_locks);
|
||||
u64 hash;
|
||||
u64 pos;
|
||||
@@ -1176,9 +1278,14 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
return ret;
|
||||
|
||||
inode = lock_hold_create(dir, dentry, S_IFLNK|S_IRWXUGO, 0,
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
&dir_lock, &inode_lock, NULL, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
ret = verify_entry(sb, scoutfs_ino(dir), dentry, dir_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = symlink_item_ops(sb, SYM_CREATE, scoutfs_ino(inode), inode_lock,
|
||||
symname, name_len);
|
||||
@@ -1198,9 +1305,13 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
i_size_write(dir, i_size_read(dir) + dentry->d_name.len);
|
||||
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
|
||||
inode_inc_iversion(dir);
|
||||
|
||||
inode->i_ctime = dir->i_mtime;
|
||||
si->crtime = inode->i_ctime;
|
||||
i_size_write(inode, name_len);
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_forest_inc_inode_count(sb);
|
||||
|
||||
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
@@ -1253,10 +1364,10 @@ int scoutfs_dir_add_next_linkref(struct super_block *sb, u64 ino,
|
||||
u64 dir_ino, u64 dir_pos,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct scoutfs_link_backref_entry *ent;
|
||||
struct scoutfs_link_backref_entry *ent = NULL;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct scoutfs_key last_key;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
@@ -1476,26 +1587,6 @@ static int verify_ancestors(struct super_block *sb, u64 p1, u64 p2,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that a dirent from the dir to the inode exists at the name.
|
||||
* The caller has the name locked in the dir.
|
||||
*/
|
||||
static int verify_entry(struct super_block *sb, u64 dir_ino, const char *name,
|
||||
unsigned name_len, u64 hash, u64 ino,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_dirent dent;
|
||||
int ret;
|
||||
|
||||
ret = lookup_dirent(sb, dir_ino, name, name_len, hash, &dent, lock);
|
||||
if (ret == 0 && le64_to_cpu(dent.ino) != ino)
|
||||
ret = -ENOENT;
|
||||
else if (ret == -ENOENT && ino == 0)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The vfs performs checks on cached inodes and dirents before calling
|
||||
* here. It doesn't hold any locks so all of those checks can be based
|
||||
@@ -1524,8 +1615,9 @@ static int verify_entry(struct super_block *sb, u64 dir_ino, const char *name,
|
||||
* from using parent/child locking orders as two groups can have both
|
||||
* parent and child relationships to each other.
|
||||
*/
|
||||
static int scoutfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct inode *new_dir, struct dentry *new_dentry)
|
||||
static int scoutfs_rename_common(struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
struct dentry *new_dentry, unsigned int flags)
|
||||
{
|
||||
struct super_block *sb = old_dir->i_sb;
|
||||
struct inode *old_inode = old_dentry->d_inode;
|
||||
@@ -1535,6 +1627,7 @@ static int scoutfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct scoutfs_lock *new_dir_lock = NULL;
|
||||
struct scoutfs_lock *old_inode_lock = NULL;
|
||||
struct scoutfs_lock *new_inode_lock = NULL;
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct timespec now;
|
||||
bool ins_new = false;
|
||||
bool del_new = false;
|
||||
@@ -1589,16 +1682,25 @@ static int scoutfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
}
|
||||
|
||||
/* make sure that the entries assumed by the argument still exist */
|
||||
ret = verify_entry(sb, scoutfs_ino(old_dir), old_dentry->d_name.name,
|
||||
old_dentry->d_name.len, old_hash,
|
||||
scoutfs_ino(old_inode), old_dir_lock) ?:
|
||||
verify_entry(sb, scoutfs_ino(new_dir), new_dentry->d_name.name,
|
||||
new_dentry->d_name.len, new_hash,
|
||||
new_inode ? scoutfs_ino(new_inode) : 0,
|
||||
new_dir_lock);
|
||||
ret = alloc_dentry_info(old_dentry) ?:
|
||||
alloc_dentry_info(new_dentry) ?:
|
||||
verify_entry(sb, scoutfs_ino(old_dir), old_dentry, old_dir_lock) ?:
|
||||
verify_entry(sb, scoutfs_ino(new_dir), new_dentry, new_dir_lock);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if ((flags & RENAME_NOREPLACE) && (new_inode != NULL)) {
|
||||
ret = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (should_orphan(new_inode)) {
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, scoutfs_ino(new_inode),
|
||||
&orph_lock);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, old_dir, false) ?:
|
||||
@@ -1607,7 +1709,7 @@ retry:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, new_dir, false)) ?:
|
||||
(new_inode == NULL ? 0 :
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, new_inode, false)) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq, true);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -1658,7 +1760,7 @@ retry:
|
||||
ins_old = true;
|
||||
|
||||
if (should_orphan(new_inode)) {
|
||||
ret = scoutfs_orphan_inode(new_inode);
|
||||
ret = scoutfs_inode_orphan_create(sb, scoutfs_ino(new_inode), orph_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@@ -1698,6 +1800,13 @@ retry:
|
||||
if (new_inode)
|
||||
old_inode->i_ctime = now;
|
||||
|
||||
inode_inc_iversion(old_dir);
|
||||
inode_inc_iversion(old_inode);
|
||||
if (new_dir != old_dir)
|
||||
inode_inc_iversion(new_dir);
|
||||
if (new_inode)
|
||||
inode_inc_iversion(new_inode);
|
||||
|
||||
scoutfs_update_inode_item(old_dir, old_dir_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(old_inode, old_inode_lock, &ind_locks);
|
||||
if (new_dir != old_dir)
|
||||
@@ -1762,10 +1871,28 @@ out_unlock:
|
||||
scoutfs_unlock(sb, old_dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, new_dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, rename_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_rename(struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
struct dentry *new_dentry)
|
||||
{
|
||||
return scoutfs_rename_common(old_dir, old_dentry, new_dir, new_dentry, 0);
|
||||
}
|
||||
|
||||
static int scoutfs_rename2(struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
struct dentry *new_dentry, unsigned int flags)
|
||||
{
|
||||
if (flags & ~RENAME_NOREPLACE)
|
||||
return -EINVAL;
|
||||
|
||||
return scoutfs_rename_common(old_dir, old_dentry, new_dir, new_dentry, flags);
|
||||
}
|
||||
|
||||
#ifdef KC_FMODE_KABI_ITERATE
|
||||
/* we only need this to set the iterate flag for kabi :/ */
|
||||
static int scoutfs_dir_open(struct inode *inode, struct file *file)
|
||||
@@ -1781,6 +1908,8 @@ static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mod
|
||||
struct inode *inode = NULL;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct scoutfs_inode_info *si;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
@@ -1788,25 +1917,36 @@ static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mod
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
inode = lock_hold_create(dir, dentry, mode, 0,
|
||||
&dir_lock, &inode_lock, &ind_locks);
|
||||
&dir_lock, &inode_lock, &orph_lock, &ind_locks);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
ret = scoutfs_inode_orphan_create(sb, scoutfs_ino(inode), orph_lock);
|
||||
if (ret < 0) {
|
||||
iput(inode);
|
||||
goto out; /* XXX returning error but items created */
|
||||
}
|
||||
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
si->crtime = inode->i_mtime;
|
||||
insert_inode_hash(inode);
|
||||
ihold(inode); /* need to update inode modifications in d_tmpfile */
|
||||
d_tmpfile(dentry, inode);
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_forest_inc_inode_count(sb);
|
||||
|
||||
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
iput(inode);
|
||||
|
||||
ret = scoutfs_orphan_inode(inode);
|
||||
WARN_ON_ONCE(ret); /* XXX returning error but items deleted */
|
||||
|
||||
out:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1843,6 +1983,7 @@ const struct inode_operations_wrapper scoutfs_dir_iops = {
|
||||
.permission = scoutfs_permission,
|
||||
},
|
||||
.tmpfile = scoutfs_tmpfile,
|
||||
.rename2 = scoutfs_rename2,
|
||||
};
|
||||
|
||||
void scoutfs_dir_exit(void)
|
||||
|
||||
@@ -81,7 +81,7 @@ static struct dentry *scoutfs_fh_to_dentry(struct super_block *sb,
|
||||
trace_scoutfs_fh_to_dentry(sb, fh_type, sfid);
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type))
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino));
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino), 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -100,7 +100,7 @@ static struct dentry *scoutfs_fh_to_parent(struct super_block *sb,
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type) &&
|
||||
fh_type == FILEID_SCOUTFS_WITH_PARENT)
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino));
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino), 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -123,7 +123,7 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
scoutfs_dir_free_backref_path(sb, &list);
|
||||
trace_scoutfs_get_parent(sb, inode, ino);
|
||||
|
||||
inode = scoutfs_iget(sb, ino);
|
||||
inode = scoutfs_iget(sb, ino, 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "msg.h"
|
||||
#include "ext.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
@@ -191,6 +192,9 @@ int scoutfs_ext_insert(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
/* inserting extent must not overlap */
|
||||
if (found.len && ext_overlap(&ins, found.start, found.len)) {
|
||||
if (ops->insert_overlap_warn)
|
||||
scoutfs_err(sb, "inserting extent %llu.%llu overlaps existing %llu.%llu",
|
||||
start, len, found.start, found.len);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -242,6 +246,8 @@ int scoutfs_ext_remove(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
/* removed extent must be entirely within found */
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
scoutfs_err(sb, "error removing extent %llu.%llu, isn't inside existing %llu.%llu",
|
||||
start, len, found.start, found.len);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ struct scoutfs_ext_ops {
|
||||
u64 start, u64 len, u64 map, u8 flags);
|
||||
int (*remove)(struct super_block *sb, void *arg, u64 start, u64 len,
|
||||
u64 map, u8 flags);
|
||||
|
||||
bool insert_overlap_warn;
|
||||
};
|
||||
|
||||
bool scoutfs_ext_can_merge(struct scoutfs_extent *left,
|
||||
|
||||
480
kmod/src/fence.c
Normal file
480
kmod/src/fence.c
Normal file
@@ -0,0 +1,480 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/timer.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "msg.h"
|
||||
#include "sysfs.h"
|
||||
#include "server.h"
|
||||
#include "fence.h"
|
||||
|
||||
/*
|
||||
* Fencing ensures that a given mount can no longer write to the
|
||||
* metadata or data devices. It's necessary to ensure that it's safe to
|
||||
* give another mount access to a resource that is currently owned by a
|
||||
* mount that has stopped responding.
|
||||
*
|
||||
* Fencing is performed in collaboration between the currently elected
|
||||
* quorum leader mount and userspace running on its host. The kernel
|
||||
* creates fencing requests as it notices that mounts have stopped
|
||||
* participating. The fence requests are published as directories in
|
||||
* sysfs. Userspace agents watch for directories, take action, and
|
||||
* write to files in the directory to indicate that the mount has been
|
||||
* fenced. Once the mount is fenced the server can reclaim the
|
||||
* resources previously held by the fenced mount.
|
||||
*
|
||||
* The fence requests contain metadata identifying the specific instance
|
||||
* of the mount that needs to be fenced. This lets a fencing agent
|
||||
* ensure that a specific mount has been fenced without necessarily
|
||||
* destroying the node that was hosting it. Maybe the node had rebooted
|
||||
* and the mount is no longer there, maybe the mount can be force
|
||||
* unmounted, maybe the node can be configured to isolate the mount from
|
||||
* the devices.
|
||||
*
|
||||
* The fencing mechanism is asynchronous and can fail but the server
|
||||
* cannot make progress until it completes. If a fence request times
|
||||
* out the server shuts down in the hope that another instance of a
|
||||
* server might have more luck fencing a non-responsive mount.
|
||||
*
|
||||
* Sources of fencing are fundamentally anchored in shared persistent
|
||||
* state. It is possible, though unlikely, that servers can fence a
|
||||
* node and then themselves fail, leaving the next server to try and
|
||||
* fence the mount again.
|
||||
*/
|
||||
|
||||
struct fence_info {
|
||||
struct kset *kset;
|
||||
struct kobject fence_dir_kobj;
|
||||
struct workqueue_struct *wq;
|
||||
wait_queue_head_t waitq;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define DECLARE_FENCE_INFO(sb, name) \
|
||||
struct fence_info *name = SCOUTFS_SB(sb)->fence_info
|
||||
|
||||
struct pending_fence {
|
||||
struct super_block *sb;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
struct list_head entry;
|
||||
struct timer_list timer;
|
||||
|
||||
ktime_t start_kt;
|
||||
__be32 ipv4_addr;
|
||||
bool fenced;
|
||||
bool error;
|
||||
int reason;
|
||||
u64 rid;
|
||||
};
|
||||
|
||||
#define FENCE_FROM_KOBJ(kobj) \
|
||||
container_of(SCOUTFS_SYSFS_ATTRS(kobj), struct pending_fence, ssa)
|
||||
#define DECLARE_FENCE_FROM_KOBJ(name, kobj) \
|
||||
struct pending_fence *name = FENCE_FROM_KOBJ(kobj)
|
||||
|
||||
static void destroy_fence(struct pending_fence *fence)
|
||||
{
|
||||
struct super_block *sb = fence->sb;
|
||||
|
||||
scoutfs_sysfs_destroy_attrs(sb, &fence->ssa);
|
||||
del_timer_sync(&fence->timer);
|
||||
kfree(fence);
|
||||
}
|
||||
|
||||
static ssize_t elapsed_secs_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
ktime_t now = ktime_get();
|
||||
struct timeval tv = { 0, };
|
||||
|
||||
if (ktime_after(now, fence->start_kt))
|
||||
tv = ktime_to_timeval(ktime_sub(now, fence->start_kt));
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)tv.tv_sec);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(elapsed_secs);
|
||||
|
||||
static ssize_t fenced_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", !!fence->fenced);
|
||||
}
|
||||
|
||||
/*
|
||||
* any write to the fenced file from userspace indicates that the mount
|
||||
* has been safely fenced and can no longer write to the shared device.
|
||||
*/
|
||||
static ssize_t fenced_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
DECLARE_FENCE_INFO(fence->sb, fi);
|
||||
|
||||
if (!fence->fenced) {
|
||||
del_timer_sync(&fence->timer);
|
||||
fence->fenced = true;
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(fenced);
|
||||
|
||||
static ssize_t error_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", !!fence->error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fencing can tell us that they were unable to fence the given mount.
|
||||
* We can't continue if the mount can't be isolated so we shut down the
|
||||
* server.
|
||||
*/
|
||||
static ssize_t error_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
struct super_block *sb = fence->sb;
|
||||
DECLARE_FENCE_INFO(fence->sb, fi);
|
||||
|
||||
if (!fence->error) {
|
||||
fence->error = true;
|
||||
scoutfs_err(sb, "error indicated by fence action for rid %016llx", fence->rid);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(error);
|
||||
|
||||
static ssize_t ipv4_addr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pI4", &fence->ipv4_addr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(ipv4_addr);
|
||||
|
||||
static ssize_t reason_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
unsigned r = fence->reason;
|
||||
char *str = "unknown";
|
||||
static char *reasons[] = {
|
||||
[SCOUTFS_FENCE_CLIENT_RECOVERY] = "client_recovery",
|
||||
[SCOUTFS_FENCE_CLIENT_RECONNECT] = "client_reconnect",
|
||||
[SCOUTFS_FENCE_QUORUM_BLOCK_LEADER] = "quorum_block_leader",
|
||||
};
|
||||
|
||||
if (r < ARRAY_SIZE(reasons) && reasons[r])
|
||||
str = reasons[r];
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", str);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(reason);
|
||||
|
||||
static ssize_t rid_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx", fence->rid);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(rid);
|
||||
|
||||
static struct attribute *fence_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(elapsed_secs),
|
||||
SCOUTFS_ATTR_PTR(fenced),
|
||||
SCOUTFS_ATTR_PTR(error),
|
||||
SCOUTFS_ATTR_PTR(ipv4_addr),
|
||||
SCOUTFS_ATTR_PTR(reason),
|
||||
SCOUTFS_ATTR_PTR(rid),
|
||||
NULL,
|
||||
};
|
||||
|
||||
#define FENCE_TIMEOUT_MS (MSEC_PER_SEC * 30)
|
||||
|
||||
static void fence_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct pending_fence *fence = from_timer(fence, timer, timer);
|
||||
struct super_block *sb = fence->sb;
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
|
||||
fence->error = true;
|
||||
scoutfs_err(sb, "fence request for rid %016llx was not serviced in %lums, raising error",
|
||||
fence->rid, FENCE_TIMEOUT_MS);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret;
|
||||
|
||||
fence = kzalloc(sizeof(struct pending_fence), GFP_NOFS);
|
||||
if (!fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fence->sb = sb;
|
||||
scoutfs_sysfs_init_attrs(sb, &fence->ssa);
|
||||
|
||||
fence->start_kt = ktime_get();
|
||||
fence->ipv4_addr = ipv4_addr;
|
||||
fence->fenced = false;
|
||||
fence->error = false;
|
||||
fence->reason = reason;
|
||||
fence->rid = rid;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs_parent(sb, &fi->kset->kobj,
|
||||
&fence->ssa, fence_attrs,
|
||||
"%016llx", rid);
|
||||
if (ret < 0) {
|
||||
kfree(fence);
|
||||
goto out;
|
||||
}
|
||||
|
||||
timer_setup(&fence->timer, fence_timeout, 0);
|
||||
fence->timer.expires = jiffies + msecs_to_jiffies(FENCE_TIMEOUT_MS);
|
||||
add_timer(&fence->timer);
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_add_tail(&fence->entry, &fi->list);
|
||||
spin_unlock(&fi->lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the caller the rid of the next fence request which has been
|
||||
* fenced. This doesn't have a position from which to return the next
|
||||
* because the caller either frees the fence request it's given or shuts
|
||||
* down.
|
||||
*/
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret = -ENOENT;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->fenced || fence->error) {
|
||||
*rid = fence->rid;
|
||||
*reason = fence->reason;
|
||||
*error = fence->error;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
bool pending = false;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->reason == reason) {
|
||||
pending = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret = -ENOENT;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->rid == rid) {
|
||||
list_del_init(&fence->entry);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (ret == 0) {
|
||||
destroy_fence(fence);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool all_fenced(struct fence_info *fi, bool *error)
|
||||
{
|
||||
struct pending_fence *fence;
|
||||
bool all = true;
|
||||
|
||||
*error = false;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->error) {
|
||||
*error = true;
|
||||
all = true;
|
||||
break;
|
||||
}
|
||||
if (!fence->fenced) {
|
||||
all = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return all;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller waits for all the current requests to be fenced, but not
|
||||
* necessarily reclaimed.
|
||||
*/
|
||||
int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
bool error;
|
||||
long ret;
|
||||
|
||||
ret = wait_event_timeout(fi->waitq, all_fenced(fi, &error), timeout_jiffies);
|
||||
if (ret == 0)
|
||||
ret = -ETIMEDOUT;
|
||||
else if (ret > 0)
|
||||
ret = 0;
|
||||
else if (error)
|
||||
ret = -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called early during startup so that it is guaranteed that
|
||||
* no other subsystems will try and call fence_start while we're waiting
|
||||
* for testing fence requests to complete.
|
||||
*/
|
||||
int scoutfs_fence_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct fence_info *fi;
|
||||
int ret;
|
||||
|
||||
/* can only fence if we can be elected by quorum */
|
||||
if (opts->quorum_slot_nr == -1) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fi = kzalloc(sizeof(struct fence_info), GFP_KERNEL);
|
||||
if (!fi) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&fi->waitq);
|
||||
spin_lock_init(&fi->lock);
|
||||
INIT_LIST_HEAD(&fi->list);
|
||||
|
||||
sbi->fence_info = fi;
|
||||
|
||||
fi->kset = kset_create_and_add("fence", NULL, scoutfs_sysfs_sb_dir(sb));
|
||||
if (!fi->kset) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fi->wq = alloc_workqueue("scoutfs_fence",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0);
|
||||
if (!fi->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_fence_destroy(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down all pending fence requests because the server is shutting down.
|
||||
*/
|
||||
void scoutfs_fence_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
|
||||
do {
|
||||
spin_lock(&fi->lock);
|
||||
fence = list_first_entry_or_null(&fi->list, struct pending_fence, entry);
|
||||
if (fence)
|
||||
list_del_init(&fence->entry);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (fence) {
|
||||
destroy_fence(fence);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
} while (fence);
|
||||
}
|
||||
|
||||
void scoutfs_fence_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct fence_info *fi = SCOUTFS_SB(sb)->fence_info;
|
||||
struct pending_fence *fence;
|
||||
struct pending_fence *tmp;
|
||||
|
||||
if (fi) {
|
||||
if (fi->wq)
|
||||
destroy_workqueue(fi->wq);
|
||||
list_for_each_entry_safe(fence, tmp, &fi->list, entry)
|
||||
destroy_fence(fence);
|
||||
if (fi->kset)
|
||||
kset_unregister(fi->kset);
|
||||
kfree(fi);
|
||||
sbi->fence_info = NULL;
|
||||
}
|
||||
}
|
||||
20
kmod/src/fence.h
Normal file
20
kmod/src/fence.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef _SCOUTFS_FENCE_H_
|
||||
#define _SCOUTFS_FENCE_H_
|
||||
|
||||
enum {
|
||||
SCOUTFS_FENCE_CLIENT_RECOVERY,
|
||||
SCOUTFS_FENCE_CLIENT_RECONNECT,
|
||||
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER,
|
||||
};
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason);
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error);
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason);
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid);
|
||||
int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies);
|
||||
|
||||
int scoutfs_fence_setup(struct super_block *sb);
|
||||
void scoutfs_fence_stop(struct super_block *sb);
|
||||
void scoutfs_fence_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "hash.h"
|
||||
#include "srch.h"
|
||||
#include "counters.h"
|
||||
#include "xattr.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -37,9 +38,9 @@
|
||||
*
|
||||
* The log btrees are modified by multiple transactions over time so
|
||||
* there is no consistent ordering relationship between the items in
|
||||
* different btrees. Each item in a log btree stores a version number
|
||||
* for the item. Readers check log btrees for the most recent version
|
||||
* that it should use.
|
||||
* different btrees. Each item in a log btree stores a seq for the
|
||||
* item. Readers check log btrees for the most recent seq that it
|
||||
* should use.
|
||||
*
|
||||
* The item cache reads items in bulk from stable btrees, and writes a
|
||||
* transaction's worth of dirty items into the item log btree.
|
||||
@@ -52,6 +53,8 @@
|
||||
*/
|
||||
|
||||
struct forest_info {
|
||||
struct super_block *sb;
|
||||
|
||||
struct mutex mutex;
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
@@ -60,6 +63,11 @@ struct forest_info {
|
||||
struct mutex srch_mutex;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
struct scoutfs_block *srch_bl;
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work log_merge_dwork;
|
||||
|
||||
atomic64_t inode_count_delta;
|
||||
};
|
||||
|
||||
#define DECLARE_FOREST_INFO(sb, name) \
|
||||
@@ -216,25 +224,17 @@ out:
|
||||
}
|
||||
|
||||
struct forest_read_items_data {
|
||||
bool is_fs;
|
||||
int fic;
|
||||
scoutfs_forest_item_cb cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static int forest_read_items(struct super_block *sb, struct scoutfs_key *key,
|
||||
static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
void *val, int val_len, void *arg)
|
||||
{
|
||||
struct forest_read_items_data *rid = arg;
|
||||
struct scoutfs_log_item_value _liv = {0,};
|
||||
struct scoutfs_log_item_value *liv = &_liv;
|
||||
|
||||
if (!rid->is_fs) {
|
||||
liv = val;
|
||||
val += sizeof(struct scoutfs_log_item_value);
|
||||
val_len -= sizeof(struct scoutfs_log_item_value);
|
||||
}
|
||||
|
||||
return rid->cb(sb, key, liv, val, val_len, rid->cb_arg);
|
||||
return rid->cb(sb, key, seq, flags, val, val_len, rid->fic, rid->cb_arg);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -246,19 +246,16 @@ static int forest_read_items(struct super_block *sb, struct scoutfs_key *key,
|
||||
* that covers all the blocks. Any keys outside of this range can't be
|
||||
* trusted because we didn't visit all the trees to check their items.
|
||||
*
|
||||
* If we hit stale blocks and retry we can call the callback for
|
||||
* duplicate items. This is harmless because the items are stable while
|
||||
* the caller holds their cluster lock and the caller has to filter out
|
||||
* item versions anyway.
|
||||
* We return -ESTALE if we hit stale blocks to give the caller a chance
|
||||
* to reset their state and retry with a newer version of the btrees.
|
||||
*/
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_lock *lock,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
|
||||
struct forest_read_items_data rid = {
|
||||
.cb = cb,
|
||||
.cb_arg = arg,
|
||||
@@ -270,31 +267,30 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_block *bl;
|
||||
struct scoutfs_key ltk;
|
||||
struct scoutfs_key orig_start = *start;
|
||||
struct scoutfs_key orig_end = *end;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
scoutfs_inc_counter(sb, forest_read_items);
|
||||
calc_bloom_nrs(&bloom, &lock->start);
|
||||
calc_bloom_nrs(&bloom, bloom_key);
|
||||
|
||||
retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
refs.fs_ref = roots.fs_root.ref;
|
||||
refs.logs_ref = roots.logs_root.ref;
|
||||
|
||||
*start = lock->start;
|
||||
*end = lock->end;
|
||||
*start = orig_start;
|
||||
*end = orig_end;
|
||||
|
||||
/* start with fs root items */
|
||||
rid.is_fs = true;
|
||||
rid.fic |= FIC_FS_ROOT;
|
||||
ret = scoutfs_btree_read_items(sb, &roots.fs_root, key, start, end,
|
||||
forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
rid.is_fs = false;
|
||||
rid.fic &= ~FIC_FS_ROOT;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
@@ -339,24 +335,40 @@ retry:
|
||||
|
||||
scoutfs_inc_counter(sb, forest_bloom_pass);
|
||||
|
||||
if ((le64_to_cpu(lt.flags) & SCOUTFS_LOG_TREES_FINALIZED))
|
||||
rid.fic |= FIC_FINALIZED;
|
||||
|
||||
ret = scoutfs_btree_read_items(sb, <.item_root, key, start,
|
||||
end, forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
rid.fic &= ~FIC_FINALIZED;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
|
||||
return -EIO;
|
||||
prev_refs = refs;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the items are deltas then combine the src with the destination
|
||||
* value and store the result in the destination.
|
||||
*
|
||||
* Returns:
|
||||
* -errno: fatal error, no change
|
||||
* 0: not delta items, no change
|
||||
* +ve: SCOUTFS_DELTA_ values indicating when dst and/or src can be dropped
|
||||
*/
|
||||
int scoutfs_forest_combine_deltas(struct scoutfs_key *key, void *dst, int dst_len,
|
||||
void *src, int src_len)
|
||||
{
|
||||
if (key->sk_zone == SCOUTFS_XATTR_TOTL_ZONE)
|
||||
return scoutfs_xattr_combine_totl(dst, dst_len, src, src_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that the bloom bits for the lock's start key are all set in
|
||||
* the current log's bloom block. We record the nr of our log tree in
|
||||
@@ -426,29 +438,29 @@ out:
|
||||
|
||||
/*
|
||||
* The caller is commiting items in the transaction and has found the
|
||||
* greatest item version amongst them. We store it in the log_trees root
|
||||
* greatest item seq amongst them. We store it in the log_trees root
|
||||
* to send to the server.
|
||||
*/
|
||||
void scoutfs_forest_set_max_vers(struct super_block *sb, u64 max_vers)
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
finf->our_log.max_item_vers = cpu_to_le64(max_vers);
|
||||
finf->our_log.max_item_seq = cpu_to_le64(max_seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is calling during setup to find the greatest item version
|
||||
* The server is calling during setup to find the greatest item seq
|
||||
* amongst all the log tree roots. They have the authoritative current
|
||||
* super.
|
||||
*
|
||||
* Item versions are only used to compare items in log trees, not in the
|
||||
* main fs tree. All we have to do is find the greatest version amongst
|
||||
* the log_trees so that new locks will have a write_version greater
|
||||
* than all the items in the log_trees.
|
||||
* Item seqs are only used to compare items in log trees, not in the
|
||||
* main fs tree. All we have to do is find the greatest seq amongst the
|
||||
* log_trees so that the core seq will have a greater seq than all the
|
||||
* items in the log_trees.
|
||||
*/
|
||||
int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *vers)
|
||||
int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *seq)
|
||||
{
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
@@ -456,7 +468,7 @@ int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
int ret;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
*vers = 0;
|
||||
*seq = 0;
|
||||
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &super->logs_root, <k, &iref);
|
||||
@@ -464,8 +476,7 @@ int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
if (iref.val_len == sizeof(struct scoutfs_log_trees)) {
|
||||
ltk = *iref.key;
|
||||
lt = iref.val;
|
||||
*vers = max(*vers,
|
||||
le64_to_cpu(lt->max_item_vers));
|
||||
*seq = max(*seq, le64_to_cpu(lt->max_item_seq));
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
@@ -514,6 +525,62 @@ int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_forest_inc_inode_count(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
atomic64_inc(&finf->inode_count_delta);
|
||||
}
|
||||
|
||||
void scoutfs_forest_dec_inode_count(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
atomic64_dec(&finf->inode_count_delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the total inode count from the super block and all the
|
||||
* log_btrees it references. This assumes it's working with a block
|
||||
* reference hierarchy that should be fully consistent. If we see
|
||||
* ESTALE we've hit persistent corruption.
|
||||
*/
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count)
|
||||
{
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
*inode_count = le64_to_cpu(super->inode_count);
|
||||
|
||||
scoutfs_key_init_log_trees(&key, 0, 0);
|
||||
for (;;) {
|
||||
ret = scoutfs_btree_next(sb, &super->logs_root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(*lt)) {
|
||||
key = *iref.key;
|
||||
scoutfs_key_inc(&key);
|
||||
lt = iref.val;
|
||||
*inode_count += le64_to_cpu(lt->inode_count_delta);
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
else if (ret == -ESTALE)
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from transactions as a new transaction opens and is
|
||||
* serialized with all writers.
|
||||
@@ -534,7 +601,7 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
memset(&finf->our_log, 0, sizeof(finf->our_log));
|
||||
finf->our_log.item_root = lt->item_root;
|
||||
finf->our_log.bloom_ref = lt->bloom_ref;
|
||||
finf->our_log.max_item_vers = lt->max_item_vers;
|
||||
finf->our_log.max_item_seq = lt->max_item_seq;
|
||||
finf->our_log.rid = lt->rid;
|
||||
finf->our_log.nr = lt->nr;
|
||||
finf->srch_file = lt->srch_file;
|
||||
@@ -542,6 +609,8 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
WARN_ON_ONCE(finf->srch_bl); /* commiting should have put the block */
|
||||
finf->srch_bl = NULL;
|
||||
|
||||
atomic64_set(&finf->inode_count_delta, le64_to_cpu(lt->inode_count_delta));
|
||||
|
||||
trace_scoutfs_forest_init_our_log(sb, le64_to_cpu(lt->rid),
|
||||
le64_to_cpu(lt->nr),
|
||||
le64_to_cpu(lt->item_root.ref.blkno),
|
||||
@@ -564,15 +633,137 @@ void scoutfs_forest_get_btrees(struct super_block *sb,
|
||||
lt->item_root = finf->our_log.item_root;
|
||||
lt->bloom_ref = finf->our_log.bloom_ref;
|
||||
lt->srch_file = finf->srch_file;
|
||||
lt->max_item_vers = finf->our_log.max_item_vers;
|
||||
lt->max_item_seq = finf->our_log.max_item_seq;
|
||||
|
||||
scoutfs_block_put(sb, finf->srch_bl);
|
||||
finf->srch_bl = NULL;
|
||||
|
||||
lt->inode_count_delta = cpu_to_le64(atomic64_read(&finf->inode_count_delta));
|
||||
|
||||
trace_scoutfs_forest_prepare_commit(sb, <->item_root.ref,
|
||||
<->bloom_ref);
|
||||
}
|
||||
|
||||
#define LOG_MERGE_DELAY_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* Regularly try to get a log merge request from the server. If we get
|
||||
* a request we walk the log_trees items to find input trees and pass
|
||||
* them to btree_merge. All of our work is done in dirty blocks
|
||||
* allocated from available free blocks that the server gave us. If we
|
||||
* hit an error then we drop our dirty blocks without writing them and
|
||||
* send an error flag to the server so they can reclaim our allocators
|
||||
* and ignore the rest of our work.
|
||||
*/
|
||||
static void scoutfs_forest_log_merge_worker(struct work_struct *work)
|
||||
{
|
||||
struct forest_info *finf = container_of(work, struct forest_info,
|
||||
log_merge_dwork.work);
|
||||
struct super_block *sb = finf->sb;
|
||||
struct scoutfs_btree_root_head *rhead = NULL;
|
||||
struct scoutfs_btree_root_head *tmp;
|
||||
struct scoutfs_log_merge_complete comp;
|
||||
struct scoutfs_log_merge_request req;
|
||||
struct scoutfs_log_trees *lt;
|
||||
struct scoutfs_block_writer wri;
|
||||
struct scoutfs_alloc alloc;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key next;
|
||||
struct scoutfs_key key;
|
||||
unsigned long delay;
|
||||
LIST_HEAD(inputs);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_log_merge(sb, &req);
|
||||
if (ret < 0)
|
||||
goto resched;
|
||||
|
||||
comp.root = req.root;
|
||||
comp.start = req.start;
|
||||
comp.end = req.end;
|
||||
comp.remain = req.end;
|
||||
comp.rid = req.rid;
|
||||
comp.seq = req.seq;
|
||||
comp.flags = 0;
|
||||
|
||||
scoutfs_alloc_init(&alloc, &req.meta_avail, &req.meta_freed);
|
||||
scoutfs_block_writer_init(sb, &wri);
|
||||
|
||||
/* find finalized input log trees within the input seq */
|
||||
for (scoutfs_key_init_log_trees(&key, 0, 0); ; scoutfs_key_inc(&key)) {
|
||||
|
||||
if (!rhead) {
|
||||
rhead = kmalloc(sizeof(*rhead), GFP_NOFS);
|
||||
if (!rhead) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_next(sb, &req.logs_root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(*lt)) {
|
||||
key = *iref.key;
|
||||
lt = iref.val;
|
||||
if (lt->item_root.ref.blkno != 0 &&
|
||||
(le64_to_cpu(lt->flags) & SCOUTFS_LOG_TREES_FINALIZED) &&
|
||||
(le64_to_cpu(lt->finalize_seq) < le64_to_cpu(req.input_seq))) {
|
||||
rhead->root = lt->item_root;
|
||||
list_add_tail(&rhead->head, &inputs);
|
||||
rhead = NULL;
|
||||
}
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* shouldn't be possible, but it's harmless */
|
||||
if (list_empty(&inputs)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_merge(sb, &alloc, &wri, &req.start, &req.end,
|
||||
&next, &comp.root, &inputs,
|
||||
!!(req.flags & cpu_to_le64(SCOUTFS_LOG_MERGE_REQUEST_SUBTREE)),
|
||||
SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT, 10);
|
||||
if (ret == -ERANGE) {
|
||||
comp.remain = next;
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_REMAIN);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
scoutfs_alloc_prepare_commit(sb, &alloc, &wri);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_block_writer_write(sb, &wri);
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
|
||||
comp.meta_avail = alloc.avail;
|
||||
comp.meta_freed = alloc.freed;
|
||||
if (ret < 0)
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_ERROR);
|
||||
|
||||
ret = scoutfs_client_commit_log_merge(sb, &comp);
|
||||
|
||||
kfree(rhead);
|
||||
list_for_each_entry_safe(rhead, tmp, &inputs, head)
|
||||
kfree(rhead);
|
||||
|
||||
resched:
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(LOG_MERGE_DELAY_MS);
|
||||
queue_delayed_work(finf->workq, &finf->log_merge_dwork, delay);
|
||||
}
|
||||
|
||||
int scoutfs_forest_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -586,10 +777,20 @@ int scoutfs_forest_setup(struct super_block *sb)
|
||||
}
|
||||
|
||||
/* the finf fields will be setup as we open a transaction */
|
||||
finf->sb = sb;
|
||||
mutex_init(&finf->mutex);
|
||||
mutex_init(&finf->srch_mutex);
|
||||
|
||||
INIT_DELAYED_WORK(&finf->log_merge_dwork,
|
||||
scoutfs_forest_log_merge_worker);
|
||||
sbi->forest_info = finf;
|
||||
|
||||
finf->workq = alloc_workqueue("scoutfs_log_merge", WQ_NON_REENTRANT |
|
||||
WQ_UNBOUND | WQ_HIGHPRI, 0);
|
||||
if (!finf->workq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
@@ -598,6 +799,24 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void scoutfs_forest_start(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
queue_delayed_work(finf->workq, &finf->log_merge_dwork,
|
||||
msecs_to_jiffies(LOG_MERGE_DELAY_MS));
|
||||
}
|
||||
|
||||
void scoutfs_forest_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
if (finf && finf->workq) {
|
||||
cancel_delayed_work_sync(&finf->log_merge_dwork);
|
||||
destroy_workqueue(finf->workq);
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_forest_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -605,6 +824,7 @@ void scoutfs_forest_destroy(struct super_block *sb)
|
||||
|
||||
if (finf) {
|
||||
scoutfs_block_put(sb, finf->srch_bl);
|
||||
|
||||
kfree(finf);
|
||||
sbi->forest_info = NULL;
|
||||
}
|
||||
|
||||
@@ -8,29 +8,36 @@ struct scoutfs_block;
|
||||
#include "btree.h"
|
||||
|
||||
/* caller gives an item to the callback */
|
||||
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_log_item_value *liv,
|
||||
void *val, int val_len, void *arg);
|
||||
enum {
|
||||
FIC_FS_ROOT = (1 << 0),
|
||||
FIC_FINALIZED = (1 << 1),
|
||||
};
|
||||
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb, struct scoutfs_key *key, u64 seq,
|
||||
u8 flags, void *val, int val_len, int fic, void *arg);
|
||||
|
||||
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *next);
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_lock *lock,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock);
|
||||
void scoutfs_forest_set_max_vers(struct super_block *sb, u64 max_vers);
|
||||
int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *vers);
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq);
|
||||
int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *seq);
|
||||
int scoutfs_forest_insert_list(struct super_block *sb,
|
||||
struct scoutfs_btree_item_list *lst);
|
||||
int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id);
|
||||
|
||||
void scoutfs_forest_inc_inode_count(struct super_block *sb);
|
||||
void scoutfs_forest_dec_inode_count(struct super_block *sb);
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count);
|
||||
|
||||
void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
@@ -38,7 +45,15 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
void scoutfs_forest_get_btrees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt);
|
||||
|
||||
/* > 0 error codes */
|
||||
#define SCOUTFS_DELTA_COMBINED 1 /* src val was combined, drop src */
|
||||
#define SCOUTFS_DELTA_COMBINED_NULL 2 /* combined val has no data, drop both */
|
||||
int scoutfs_forest_combine_deltas(struct scoutfs_key *key, void *dst, int dst_len,
|
||||
void *src, int src_len);
|
||||
|
||||
int scoutfs_forest_setup(struct super_block *sb);
|
||||
void scoutfs_forest_start(struct super_block *sb);
|
||||
void scoutfs_forest_stop(struct super_block *sb);
|
||||
void scoutfs_forest_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
#ifndef _SCOUTFS_FORMAT_H_
|
||||
#define _SCOUTFS_FORMAT_H_
|
||||
|
||||
#define SCOUTFS_INTEROP_VERSION 0ULL
|
||||
#define SCOUTFS_INTEROP_VERSION_STR __stringify(0)
|
||||
/*
|
||||
* The format version defines the format of structures on devices,
|
||||
* structures that are communicated over the wire, and the protocol
|
||||
* behind the structures.
|
||||
*/
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN_STR __stringify(SCOUTFS_FORMAT_VERSION_MIN)
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX_STR __stringify(SCOUTFS_FORMAT_VERSION_MAX)
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
@@ -168,6 +175,11 @@ struct scoutfs_key {
|
||||
#define sko_rid _sk_first
|
||||
#define sko_ino _sk_second
|
||||
|
||||
/* xattr totl */
|
||||
#define skxt_a _sk_first
|
||||
#define skxt_b _sk_second
|
||||
#define skxt_c _sk_third
|
||||
|
||||
/* inode */
|
||||
#define ski_ino _sk_first
|
||||
|
||||
@@ -195,19 +207,16 @@ struct scoutfs_key {
|
||||
#define sklt_rid _sk_first
|
||||
#define sklt_nr _sk_second
|
||||
|
||||
/* seqs */
|
||||
#define skts_trans_seq _sk_first
|
||||
#define skts_rid _sk_second
|
||||
|
||||
/* mounted clients */
|
||||
#define skmc_rid _sk_first
|
||||
|
||||
/* free extents by blkno */
|
||||
#define skfb_end _sk_second
|
||||
#define skfb_len _sk_third
|
||||
/* free extents by len */
|
||||
#define skfl_neglen _sk_second
|
||||
#define skfl_blkno _sk_third
|
||||
#define skfb_end _sk_first
|
||||
#define skfb_len _sk_second
|
||||
/* free extents by order */
|
||||
#define skfo_revord _sk_first
|
||||
#define skfo_end _sk_second
|
||||
#define skfo_len _sk_third
|
||||
|
||||
struct scoutfs_avl_root {
|
||||
__le16 node;
|
||||
@@ -243,11 +252,15 @@ struct scoutfs_btree_root {
|
||||
struct scoutfs_btree_item {
|
||||
struct scoutfs_avl_node node;
|
||||
struct scoutfs_key key;
|
||||
__le64 seq;
|
||||
__le16 val_off;
|
||||
__le16 val_len;
|
||||
__u8 __pad[4];
|
||||
__u8 flags;
|
||||
__u8 __pad[3];
|
||||
};
|
||||
|
||||
#define SCOUTFS_ITEM_FLAG_DELETION (1 << 0)
|
||||
|
||||
struct scoutfs_btree_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_avl_root item_root;
|
||||
@@ -285,9 +298,10 @@ struct scoutfs_alloc_list_head {
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 total_nr;
|
||||
__le32 first_nr;
|
||||
__u8 __pad[4];
|
||||
__le32 flags;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* While the main allocator uses extent items in btree blocks, metadata
|
||||
* allocations for a single transaction are recorded in arrays in
|
||||
@@ -316,17 +330,25 @@ struct scoutfs_alloc_list_block {
|
||||
*/
|
||||
struct scoutfs_alloc_root {
|
||||
__le64 total_len;
|
||||
__le32 flags;
|
||||
__le32 _pad;
|
||||
struct scoutfs_btree_root root;
|
||||
};
|
||||
|
||||
/* Shared by _alloc_list_head and _alloc_root */
|
||||
#define SCOUTFS_ALLOC_FLAG_LOW (1U << 0)
|
||||
|
||||
/* types of allocators, exposed to alloc_detail ioctl */
|
||||
#define SCOUTFS_ALLOC_OWNER_NONE 0
|
||||
#define SCOUTFS_ALLOC_OWNER_SERVER 1
|
||||
#define SCOUTFS_ALLOC_OWNER_MOUNT 2
|
||||
#define SCOUTFS_ALLOC_OWNER_SRCH 3
|
||||
#define SCOUTFS_ALLOC_OWNER_LOG_MERGE 4
|
||||
|
||||
struct scoutfs_mounted_client_btree_val {
|
||||
union scoutfs_inet_addr addr;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
};
|
||||
|
||||
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
|
||||
@@ -427,10 +449,20 @@ struct scoutfs_srch_compact {
|
||||
/* client -> server: compaction failed */
|
||||
#define SCOUTFS_SRCH_COMPACT_FLAG_ERROR (1 << 5)
|
||||
|
||||
#define SCOUTFS_DATA_ALLOC_MAX_ZONES 1024
|
||||
#define SCOUTFS_DATA_ALLOC_ZONE_BYTES DIV_ROUND_UP(SCOUTFS_DATA_ALLOC_MAX_ZONES, 8)
|
||||
#define SCOUTFS_DATA_ALLOC_ZONE_LE64S DIV_ROUND_UP(SCOUTFS_DATA_ALLOC_MAX_ZONES, 64)
|
||||
|
||||
/*
|
||||
* XXX I imagine we should rename these now that they've evolved to track
|
||||
* all the btrees that clients use during a transaction. It's not just
|
||||
* about item logs, it's about clients making changes to trees.
|
||||
*
|
||||
* @get_trans_seq, @commit_trans_seq: These pair of sequence numbers
|
||||
* determine if a transaction is currently open for the mount that owns
|
||||
* the log_trees struct. get_trans_seq is advanced by the server as the
|
||||
* transaction is opened. The server sets comimt_trans_seq equal to
|
||||
* get_ as the transaction is committed.
|
||||
*/
|
||||
struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
@@ -440,26 +472,22 @@ struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_root data_avail;
|
||||
struct scoutfs_alloc_root data_freed;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
__le64 max_item_vers;
|
||||
__le64 data_alloc_zone_blocks;
|
||||
__le64 data_alloc_zones[SCOUTFS_DATA_ALLOC_ZONE_LE64S];
|
||||
__le64 inode_count_delta;
|
||||
__le64 get_trans_seq;
|
||||
__le64 commit_trans_seq;
|
||||
__le64 max_item_seq;
|
||||
__le64 finalize_seq;
|
||||
__le64 rid;
|
||||
__le64 nr;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
struct scoutfs_log_item_value {
|
||||
__le64 vers;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
__u8 data[];
|
||||
};
|
||||
#define SCOUTFS_LOG_TREES_FINALIZED (1ULL << 0)
|
||||
|
||||
/*
|
||||
* FS items are limited by the max btree value length with the log item
|
||||
* value header.
|
||||
*/
|
||||
#define SCOUTFS_MAX_VAL_SIZE \
|
||||
(SCOUTFS_BTREE_MAX_VAL_LEN - sizeof(struct scoutfs_log_item_value))
|
||||
|
||||
#define SCOUTFS_LOG_ITEM_FLAG_DELETION (1 << 0)
|
||||
/* FS items are limited by the max btree value length */
|
||||
#define SCOUTFS_MAX_VAL_SIZE SCOUTFS_BTREE_MAX_VAL_LEN
|
||||
|
||||
struct scoutfs_bloom_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
@@ -481,49 +509,122 @@ struct scoutfs_bloom_block {
|
||||
member_sizeof(struct scoutfs_bloom_block, bits[0]) * 8)
|
||||
#define SCOUTFS_FOREST_BLOOM_FUNC_BITS (SCOUTFS_BLOCK_LG_SHIFT + 3)
|
||||
|
||||
/*
|
||||
* A private server btree item which records the status of a log merge
|
||||
* operation that is in progress.
|
||||
*/
|
||||
struct scoutfs_log_merge_status {
|
||||
struct scoutfs_key next_range_key;
|
||||
__le64 nr_requests;
|
||||
__le64 nr_complete;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* A request is sent to the client and stored in a server btree item to
|
||||
* record resources that would be reclaimed if the client failed. It
|
||||
* has all the inputs needed for the client to perform its portion of a
|
||||
* merge.
|
||||
*/
|
||||
struct scoutfs_log_merge_request {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root logs_root;
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
__le64 input_seq;
|
||||
__le64 rid;
|
||||
__le64 seq;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
/* request root is subtree of fs root at parent, restricted merging modifications */
|
||||
#define SCOUTFS_LOG_MERGE_REQUEST_SUBTREE (1ULL << 0)
|
||||
|
||||
/*
|
||||
* The output of a client's merge of log btree items into a subtree
|
||||
* rooted at a parent in the fs_root. The client sends it to the
|
||||
* server, who stores it in a btree item for later splicing/rebalancing.
|
||||
*/
|
||||
struct scoutfs_log_merge_complete {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key remain;
|
||||
__le64 rid;
|
||||
__le64 seq;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
/* merge failed, ignore completion and reclaim stored request */
|
||||
#define SCOUTFS_LOG_MERGE_COMP_ERROR (1ULL << 0)
|
||||
/* merge didn't complete range, restart from remain */
|
||||
#define SCOUTFS_LOG_MERGE_COMP_REMAIN (1ULL << 1)
|
||||
|
||||
/*
|
||||
* Range items record the ranges of the fs keyspace that still need to
|
||||
* be merged. They're added as a merge starts, removed as requests are
|
||||
* sent and added back if the request didn't consume its entire range.
|
||||
*/
|
||||
struct scoutfs_log_merge_range {
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
};
|
||||
|
||||
struct scoutfs_log_merge_freeing {
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key key;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* Keys are first sorted by major key zones.
|
||||
*/
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 1
|
||||
#define SCOUTFS_RID_ZONE 2
|
||||
#define SCOUTFS_FS_ZONE 3
|
||||
#define SCOUTFS_LOCK_ZONE 4
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 4
|
||||
#define SCOUTFS_ORPHAN_ZONE 8
|
||||
#define SCOUTFS_XATTR_TOTL_ZONE 12
|
||||
#define SCOUTFS_FS_ZONE 16
|
||||
#define SCOUTFS_LOCK_ZONE 20
|
||||
/* Items only stored in server btrees */
|
||||
#define SCOUTFS_LOG_TREES_ZONE 6
|
||||
#define SCOUTFS_TRANS_SEQ_ZONE 7
|
||||
#define SCOUTFS_MOUNTED_CLIENT_ZONE 8
|
||||
#define SCOUTFS_SRCH_ZONE 9
|
||||
#define SCOUTFS_FREE_EXTENT_ZONE 10
|
||||
#define SCOUTFS_LOG_TREES_ZONE 24
|
||||
#define SCOUTFS_MOUNTED_CLIENT_ZONE 28
|
||||
#define SCOUTFS_SRCH_ZONE 32
|
||||
#define SCOUTFS_FREE_EXTENT_BLKNO_ZONE 36
|
||||
#define SCOUTFS_FREE_EXTENT_ORDER_ZONE 40
|
||||
/* Items only stored in log merge server btrees */
|
||||
#define SCOUTFS_LOG_MERGE_STATUS_ZONE 44
|
||||
#define SCOUTFS_LOG_MERGE_RANGE_ZONE 48
|
||||
#define SCOUTFS_LOG_MERGE_REQUEST_ZONE 52
|
||||
#define SCOUTFS_LOG_MERGE_COMPLETE_ZONE 56
|
||||
#define SCOUTFS_LOG_MERGE_FREEING_ZONE 60
|
||||
|
||||
/* inode index zone */
|
||||
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 1
|
||||
#define SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE 2
|
||||
#define SCOUTFS_INODE_INDEX_NR 3 /* don't forget to update */
|
||||
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 4
|
||||
#define SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE 8
|
||||
|
||||
/* rid zone (also used in server alloc btree) */
|
||||
#define SCOUTFS_ORPHAN_TYPE 1
|
||||
/* orphan zone, redundant type used for clarity */
|
||||
#define SCOUTFS_ORPHAN_TYPE 4
|
||||
|
||||
/* fs zone */
|
||||
#define SCOUTFS_INODE_TYPE 1
|
||||
#define SCOUTFS_XATTR_TYPE 2
|
||||
#define SCOUTFS_DIRENT_TYPE 3
|
||||
#define SCOUTFS_READDIR_TYPE 4
|
||||
#define SCOUTFS_LINK_BACKREF_TYPE 5
|
||||
#define SCOUTFS_SYMLINK_TYPE 6
|
||||
#define SCOUTFS_DATA_EXTENT_TYPE 7
|
||||
#define SCOUTFS_INODE_TYPE 4
|
||||
#define SCOUTFS_XATTR_TYPE 8
|
||||
#define SCOUTFS_DIRENT_TYPE 12
|
||||
#define SCOUTFS_READDIR_TYPE 16
|
||||
#define SCOUTFS_LINK_BACKREF_TYPE 20
|
||||
#define SCOUTFS_SYMLINK_TYPE 24
|
||||
#define SCOUTFS_DATA_EXTENT_TYPE 28
|
||||
|
||||
/* lock zone, only ever found in lock ranges, never in persistent items */
|
||||
#define SCOUTFS_RENAME_TYPE 1
|
||||
#define SCOUTFS_RENAME_TYPE 4
|
||||
|
||||
/* srch zone, only in server btrees */
|
||||
#define SCOUTFS_SRCH_LOG_TYPE 1
|
||||
#define SCOUTFS_SRCH_BLOCKS_TYPE 2
|
||||
#define SCOUTFS_SRCH_PENDING_TYPE 3
|
||||
#define SCOUTFS_SRCH_BUSY_TYPE 4
|
||||
|
||||
/* free extents in allocator btrees in client and server, by blkno or len */
|
||||
#define SCOUTFS_FREE_EXTENT_BLKNO_TYPE 1
|
||||
#define SCOUTFS_FREE_EXTENT_LEN_TYPE 2
|
||||
#define SCOUTFS_SRCH_LOG_TYPE 4
|
||||
#define SCOUTFS_SRCH_BLOCKS_TYPE 8
|
||||
#define SCOUTFS_SRCH_PENDING_TYPE 12
|
||||
#define SCOUTFS_SRCH_BUSY_TYPE 16
|
||||
|
||||
/* file data extents have start and len in key */
|
||||
struct scoutfs_data_extent_val {
|
||||
@@ -548,6 +649,17 @@ struct scoutfs_xattr {
|
||||
__u8 name[];
|
||||
};
|
||||
|
||||
/*
|
||||
* .totl. xattrs are mapped to items. The dotted u64s in the xattr name
|
||||
* map to the item key. The item value total is the sum of all the
|
||||
* xattr values. The item value count records the number of xattrs
|
||||
* contributing to the total and is used when combining logged items to
|
||||
* determine if totals are being created or destroyed.
|
||||
*/
|
||||
struct scoutfs_xattr_totl_val {
|
||||
__le64 total;
|
||||
__le64 count;
|
||||
};
|
||||
|
||||
/* XXX does this exist upstream somewhere? */
|
||||
#define member_sizeof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
|
||||
@@ -582,6 +694,12 @@ struct scoutfs_xattr {
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* A newly elected leader will give fencing some time before giving up and
|
||||
* shutting down.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_FENCE_TO_MS (15 * MSEC_PER_SEC)
|
||||
|
||||
struct scoutfs_quorum_message {
|
||||
__le64 fsid;
|
||||
__le64 version;
|
||||
@@ -613,35 +731,76 @@ struct scoutfs_quorum_config {
|
||||
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 term;
|
||||
__le64 random_write_mark;
|
||||
__le64 flags;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 rid;
|
||||
struct scoutfs_timespec ts;
|
||||
} write, update_term, set_leader, clear_leader, fenced;
|
||||
enum {
|
||||
SCOUTFS_QUORUM_EVENT_BEGIN, /* quorum service starting up */
|
||||
SCOUTFS_QUORUM_EVENT_TERM, /* updated persistent term */
|
||||
SCOUTFS_QUORUM_EVENT_ELECT, /* won election */
|
||||
SCOUTFS_QUORUM_EVENT_FENCE, /* server fenced others */
|
||||
SCOUTFS_QUORUM_EVENT_STOP, /* server stopped */
|
||||
SCOUTFS_QUORUM_EVENT_END, /* quorum service shutting down */
|
||||
SCOUTFS_QUORUM_EVENT_NR,
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 write_nr;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 write_nr;
|
||||
__le64 rid;
|
||||
__le64 term;
|
||||
struct scoutfs_timespec ts;
|
||||
} events[SCOUTFS_QUORUM_EVENT_NR];
|
||||
};
|
||||
|
||||
/*
|
||||
* Tunable options that apply to the entire system. They can be set in
|
||||
* mkfs or in sysfs files which send an rpc to the server to make the
|
||||
* change. The super version defines the options that exist.
|
||||
*
|
||||
* @set_bits: bits for each 64bit starting offset after set_bits
|
||||
* indicate which logical option is set.
|
||||
*
|
||||
* @data_alloc_zone_blocks: if set, the data device is logically divided
|
||||
* into contiguous zones of this many blocks. Data allocation will try
|
||||
* and isolate allocated extents for each mount to their own zone. The
|
||||
* zone size must be larger than the data alloc high water mark and
|
||||
* large enough such that the number of zones is kept within its static
|
||||
* limit.
|
||||
*/
|
||||
struct scoutfs_volume_options {
|
||||
__le64 set_bits;
|
||||
__le64 data_alloc_zone_blocks;
|
||||
__le64 __future_expansion[63];
|
||||
};
|
||||
|
||||
#define scoutfs_volopt_nr(field) \
|
||||
((offsetof(struct scoutfs_volume_options, field) - \
|
||||
(offsetof(struct scoutfs_volume_options, set_bits) + \
|
||||
member_sizeof(struct scoutfs_volume_options, set_bits))) / sizeof(__le64))
|
||||
#define scoutfs_volopt_bit(field) \
|
||||
(1ULL << scoutfs_volopt_nr(field))
|
||||
|
||||
#define SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_NR \
|
||||
scoutfs_volopt_nr(data_alloc_zone_blocks)
|
||||
#define SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_BIT \
|
||||
scoutfs_volopt_bit(data_alloc_zone_blocks)
|
||||
|
||||
#define SCOUTFS_VOLOPT_EXPANSION_BITS \
|
||||
(~(scoutfs_volopt_bit(__future_expansion) - 1))
|
||||
|
||||
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
|
||||
|
||||
struct scoutfs_super_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 id;
|
||||
__le64 version;
|
||||
__le64 fmt_vers;
|
||||
__le64 flags;
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 seq;
|
||||
__le64 next_ino;
|
||||
__le64 next_trans_seq;
|
||||
__le64 inode_count;
|
||||
__le64 total_meta_blocks; /* both static and dynamic */
|
||||
__le64 first_meta_blkno; /* first dynamically allocated */
|
||||
__le64 last_meta_blkno;
|
||||
__le64 total_data_blocks;
|
||||
__le64 first_data_blkno;
|
||||
__le64 last_data_blkno;
|
||||
struct scoutfs_quorum_config qconf;
|
||||
struct scoutfs_alloc_root meta_alloc[2];
|
||||
struct scoutfs_alloc_root data_alloc;
|
||||
@@ -649,9 +808,10 @@ struct scoutfs_super_block {
|
||||
struct scoutfs_alloc_list_head server_meta_freed[2];
|
||||
struct scoutfs_btree_root fs_root;
|
||||
struct scoutfs_btree_root logs_root;
|
||||
struct scoutfs_btree_root trans_seqs;
|
||||
struct scoutfs_btree_root log_merge;
|
||||
struct scoutfs_btree_root mounted_clients;
|
||||
struct scoutfs_btree_root srch_root;
|
||||
struct scoutfs_volume_options volopt;
|
||||
};
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
@@ -675,13 +835,6 @@ struct scoutfs_super_block {
|
||||
*
|
||||
* @offline_blocks: The number of fixed 4k blocks that could be made
|
||||
* online by staging.
|
||||
*
|
||||
* XXX
|
||||
* - otime?
|
||||
* - compat flags?
|
||||
* - version?
|
||||
* - generation?
|
||||
* - be more careful with rdev?
|
||||
*/
|
||||
struct scoutfs_inode {
|
||||
__le64 size;
|
||||
@@ -692,6 +845,7 @@ struct scoutfs_inode {
|
||||
__le64 offline_blocks;
|
||||
__le64 next_readdir_pos;
|
||||
__le64 next_xattr_id;
|
||||
__le64 version;
|
||||
__le32 nlink;
|
||||
__le32 uid;
|
||||
__le32 gid;
|
||||
@@ -701,6 +855,7 @@ struct scoutfs_inode {
|
||||
struct scoutfs_timespec atime;
|
||||
struct scoutfs_timespec ctime;
|
||||
struct scoutfs_timespec mtime;
|
||||
struct scoutfs_timespec crtime;
|
||||
};
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
@@ -752,14 +907,15 @@ enum scoutfs_dentry_type {
|
||||
#define SCOUTFS_XATTR_MAX_NAME_LEN 255
|
||||
#define SCOUTFS_XATTR_MAX_VAL_LEN 65535
|
||||
#define SCOUTFS_XATTR_MAX_PART_SIZE SCOUTFS_MAX_VAL_SIZE
|
||||
#define SCOUTFS_XATTR_MAX_TOTL_U64 23 /* octal U64_MAX */
|
||||
|
||||
#define SCOUTFS_XATTR_NR_PARTS(name_len, val_len) \
|
||||
DIV_ROUND_UP(sizeof(struct scoutfs_xattr) + name_len + val_len, \
|
||||
(unsigned int)SCOUTFS_XATTR_MAX_PART_SIZE)
|
||||
|
||||
#define SCOUTFS_LOCK_INODE_GROUP_NR 128
|
||||
#define SCOUTFS_LOCK_INODE_GROUP_NR 1024
|
||||
#define SCOUTFS_LOCK_INODE_GROUP_MASK (SCOUTFS_LOCK_INODE_GROUP_NR - 1)
|
||||
#define SCOUTFS_LOCK_SEQ_GROUP_MASK ((1ULL << 7) - 1)
|
||||
#define SCOUTFS_LOCK_SEQ_GROUP_MASK ((1ULL << 10) - 1)
|
||||
|
||||
/*
|
||||
* messages over the wire.
|
||||
@@ -782,7 +938,7 @@ enum scoutfs_dentry_type {
|
||||
*/
|
||||
struct scoutfs_net_greeting {
|
||||
__le64 fsid;
|
||||
__le64 version;
|
||||
__le64 fmt_vers;
|
||||
__le64 server_term;
|
||||
__le64 rid;
|
||||
__le64 flags;
|
||||
@@ -813,7 +969,6 @@ struct scoutfs_net_greeting {
|
||||
* response messages.
|
||||
*/
|
||||
struct scoutfs_net_header {
|
||||
__le64 clock_sync_id;
|
||||
__le64 seq;
|
||||
__le64 recv_seq;
|
||||
__le64 id;
|
||||
@@ -833,14 +988,21 @@ enum scoutfs_net_cmd {
|
||||
SCOUTFS_NET_CMD_ALLOC_INODES,
|
||||
SCOUTFS_NET_CMD_GET_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_SYNC_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_GET_ROOTS,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
SCOUTFS_NET_CMD_GET_LAST_SEQ,
|
||||
SCOUTFS_NET_CMD_LOCK,
|
||||
SCOUTFS_NET_CMD_LOCK_RECOVER,
|
||||
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
|
||||
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
|
||||
SCOUTFS_NET_CMD_GET_LOG_MERGE,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_MERGE,
|
||||
SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
SCOUTFS_NET_CMD_GET_VOLOPT,
|
||||
SCOUTFS_NET_CMD_SET_VOLOPT,
|
||||
SCOUTFS_NET_CMD_CLEAR_VOLOPT,
|
||||
SCOUTFS_NET_CMD_RESIZE_DEVICES,
|
||||
SCOUTFS_NET_CMD_STATFS,
|
||||
SCOUTFS_NET_CMD_FAREWELL,
|
||||
SCOUTFS_NET_CMD_UNKNOWN,
|
||||
};
|
||||
@@ -883,9 +1045,23 @@ struct scoutfs_net_roots {
|
||||
struct scoutfs_btree_root srch_root;
|
||||
};
|
||||
|
||||
struct scoutfs_net_resize_devices {
|
||||
__le64 new_total_meta_blocks;
|
||||
__le64 new_total_data_blocks;
|
||||
};
|
||||
|
||||
struct scoutfs_net_statfs {
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 free_meta_blocks;
|
||||
__le64 total_meta_blocks;
|
||||
__le64 free_data_blocks;
|
||||
__le64 total_data_blocks;
|
||||
__le64 inode_count;
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock {
|
||||
struct scoutfs_key key;
|
||||
__le64 write_version;
|
||||
__le64 write_seq;
|
||||
__u8 old_mode;
|
||||
__u8 new_mode;
|
||||
__u8 __pad[6];
|
||||
@@ -962,7 +1138,7 @@ enum scoutfs_corruption_sources {
|
||||
|
||||
#define SC_NR_LONGS DIV_ROUND_UP(SC_NR_SOURCES, BITS_PER_LONG)
|
||||
|
||||
#define SCOUTFS_OPEN_INO_MAP_SHIFT 7
|
||||
#define SCOUTFS_OPEN_INO_MAP_SHIFT 10
|
||||
#define SCOUTFS_OPEN_INO_MAP_BITS (1 << SCOUTFS_OPEN_INO_MAP_SHIFT)
|
||||
#define SCOUTFS_OPEN_INO_MAP_MASK (SCOUTFS_OPEN_INO_MAP_BITS - 1)
|
||||
#define SCOUTFS_OPEN_INO_MAP_LE64S (SCOUTFS_OPEN_INO_MAP_BITS / 64)
|
||||
|
||||
645
kmod/src/inode.c
645
kmod/src/inode.c
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,8 @@
|
||||
|
||||
struct scoutfs_lock;
|
||||
|
||||
#define SCOUTFS_INODE_NR_INDICES 2
|
||||
|
||||
struct scoutfs_inode_info {
|
||||
/* read or initialized for each inode instance */
|
||||
u64 ino;
|
||||
@@ -20,6 +22,7 @@ struct scoutfs_inode_info {
|
||||
u64 online_blocks;
|
||||
u64 offline_blocks;
|
||||
u32 flags;
|
||||
struct timespec crtime;
|
||||
|
||||
/*
|
||||
* Protects per-inode extent items, most particularly readers
|
||||
@@ -37,8 +40,8 @@ struct scoutfs_inode_info {
|
||||
*/
|
||||
struct mutex item_mutex;
|
||||
bool have_item;
|
||||
u64 item_majors[SCOUTFS_INODE_INDEX_NR];
|
||||
u32 item_minors[SCOUTFS_INODE_INDEX_NR];
|
||||
u64 item_majors[SCOUTFS_INODE_NR_INDICES];
|
||||
u32 item_minors[SCOUTFS_INODE_NR_INDICES];
|
||||
|
||||
/* updated at on each new lock acquisition */
|
||||
atomic64_t last_refreshed;
|
||||
@@ -49,14 +52,14 @@ struct scoutfs_inode_info {
|
||||
struct scoutfs_per_task pt_data_lock;
|
||||
struct scoutfs_data_waitq data_waitq;
|
||||
struct rw_semaphore xattr_rwsem;
|
||||
struct rb_node writeback_node;
|
||||
struct list_head writeback_entry;
|
||||
|
||||
struct scoutfs_lock_coverage ino_lock_cov;
|
||||
|
||||
/* drop if i_count hits 0, allows drop while invalidate holds coverage */
|
||||
bool drop_invalidated;
|
||||
struct llist_node inv_iput_llnode;
|
||||
atomic_t inv_iput_count;
|
||||
struct llist_node iput_llnode;
|
||||
atomic_t iput_count;
|
||||
|
||||
struct inode inode;
|
||||
};
|
||||
@@ -75,11 +78,13 @@ struct inode *scoutfs_alloc_inode(struct super_block *sb);
|
||||
void scoutfs_destroy_inode(struct inode *inode);
|
||||
int scoutfs_drop_inode(struct inode *inode);
|
||||
void scoutfs_evict_inode(struct inode *inode);
|
||||
int scoutfs_orphan_inode(struct inode *inode);
|
||||
void scoutfs_inode_queue_iput(struct inode *inode);
|
||||
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino);
|
||||
#define SCOUTFS_IGF_LINKED (1 << 0) /* enoent if nlink == 0 */
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf);
|
||||
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
|
||||
|
||||
void scoutfs_inode_init_key(struct scoutfs_key *key, u64 ino);
|
||||
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
|
||||
u32 minor, u64 ino);
|
||||
int scoutfs_inode_index_start(struct super_block *sb, u64 *seq);
|
||||
@@ -89,9 +94,9 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
|
||||
struct list_head *list, u64 ino,
|
||||
umode_t mode);
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq);
|
||||
struct list_head *list, u64 seq, bool allocing);
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq);
|
||||
bool set_data_seq, bool allocing);
|
||||
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
|
||||
|
||||
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);
|
||||
@@ -114,25 +119,24 @@ u64 scoutfs_inode_data_version(struct inode *inode);
|
||||
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off);
|
||||
int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
|
||||
int flags);
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock);
|
||||
int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat);
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_scan_orphans(struct super_block *sb);
|
||||
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino);
|
||||
int scoutfs_orphan_delete(struct super_block *sb, u64 ino);
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock);
|
||||
int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_lock *lock);
|
||||
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode);
|
||||
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);
|
||||
|
||||
u64 scoutfs_last_ino(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_exit(void);
|
||||
int scoutfs_inode_init(void);
|
||||
|
||||
int scoutfs_inode_setup(struct super_block *sb);
|
||||
void scoutfs_inode_start(struct super_block *sb);
|
||||
void scoutfs_inode_orphan_stop(struct super_block *sb);
|
||||
void scoutfs_inode_flush_iput(struct super_block *sb);
|
||||
void scoutfs_inode_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
447
kmod/src/ioctl.c
447
kmod/src/ioctl.c
@@ -21,6 +21,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/aio.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "key.h"
|
||||
@@ -38,6 +39,8 @@
|
||||
#include "hash.h"
|
||||
#include "srch.h"
|
||||
#include "alloc.h"
|
||||
#include "server.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -540,19 +543,17 @@ out:
|
||||
static long scoutfs_ioc_stat_more(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_ioctl_stat_more stm;
|
||||
|
||||
if (get_user(stm.valid_bytes, (__u64 __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
stm.valid_bytes = min_t(u64, stm.valid_bytes,
|
||||
sizeof(struct scoutfs_ioctl_stat_more));
|
||||
stm.meta_seq = scoutfs_inode_meta_seq(inode);
|
||||
stm.data_seq = scoutfs_inode_data_seq(inode);
|
||||
stm.data_version = scoutfs_inode_data_version(inode);
|
||||
scoutfs_inode_get_onoff(inode, &stm.online_blocks, &stm.offline_blocks);
|
||||
stm.crtime_sec = si->crtime.tv_sec;
|
||||
stm.crtime_nsec = si->crtime.tv_nsec;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &stm, stm.valid_bytes))
|
||||
if (copy_to_user((void __user *)arg, &stm, sizeof(stm)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@@ -616,6 +617,7 @@ static long scoutfs_ioc_data_waiting(struct file *file, unsigned long arg)
|
||||
static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file->f_inode;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_ioctl_setattr_more __user *usm = (void __user *)arg;
|
||||
struct scoutfs_ioctl_setattr_more sm;
|
||||
@@ -674,7 +676,7 @@ static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
set_data_seq = sm.data_version != 0 ? true : false;
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq);
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq, false);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
@@ -684,6 +686,8 @@ static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
|
||||
i_size_write(inode, sm.i_size);
|
||||
inode->i_ctime.tv_sec = sm.ctime_sec;
|
||||
inode->i_ctime.tv_nsec = sm.ctime_nsec;
|
||||
si->crtime.tv_sec = sm.crtime_sec;
|
||||
si->crtime.tv_nsec = sm.crtime_nsec;
|
||||
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
ret = 0;
|
||||
@@ -866,28 +870,35 @@ static long scoutfs_ioc_statfs_more(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct super_block *sb = file_inode(file)->i_sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_super_block *super;
|
||||
struct scoutfs_ioctl_statfs_more sfm;
|
||||
int ret;
|
||||
|
||||
if (get_user(sfm.valid_bytes, (__u64 __user *)arg))
|
||||
return -EFAULT;
|
||||
super = kzalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sfm.valid_bytes = min_t(u64, sfm.valid_bytes,
|
||||
sizeof(struct scoutfs_ioctl_statfs_more));
|
||||
sfm.fsid = le64_to_cpu(super->hdr.fsid);
|
||||
sfm.rid = sbi->rid;
|
||||
sfm.total_meta_blocks = le64_to_cpu(super->total_meta_blocks);
|
||||
sfm.total_data_blocks = le64_to_cpu(super->total_data_blocks);
|
||||
sfm.reserved_meta_blocks = scoutfs_server_reserved_meta_blocks(sb);
|
||||
|
||||
ret = scoutfs_client_get_last_seq(sb, &sfm.committed_seq);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &sfm, sfm.valid_bytes))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
if (copy_to_user((void __user *)arg, &sfm, sizeof(sfm)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct copy_alloc_detail_args {
|
||||
@@ -991,6 +1002,402 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long scoutfs_ioc_resize_devices(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct super_block *sb = file_inode(file)->i_sb;
|
||||
struct scoutfs_ioctl_resize_devices __user *urd = (void __user *)arg;
|
||||
struct scoutfs_ioctl_resize_devices rd;
|
||||
struct scoutfs_net_resize_devices nrd;
|
||||
int ret;
|
||||
|
||||
if (!(file->f_mode & FMODE_READ)) {
|
||||
ret = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (copy_from_user(&rd, urd, sizeof(rd))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
nrd.new_total_meta_blocks = cpu_to_le64(rd.new_total_meta_blocks);
|
||||
nrd.new_total_data_blocks = cpu_to_le64(rd.new_total_data_blocks);
|
||||
|
||||
ret = scoutfs_client_resize_devices(sb, &nrd);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct xattr_total_entry {
|
||||
struct rb_node node;
|
||||
struct scoutfs_ioctl_xattr_total xt;
|
||||
u64 fs_seq;
|
||||
u64 fs_total;
|
||||
u64 fs_count;
|
||||
u64 fin_seq;
|
||||
u64 fin_total;
|
||||
s64 fin_count;
|
||||
u64 log_seq;
|
||||
u64 log_total;
|
||||
s64 log_count;
|
||||
};
|
||||
|
||||
static int cmp_xt_entry_name(const struct xattr_total_entry *a,
|
||||
const struct xattr_total_entry *b)
|
||||
|
||||
{
|
||||
return scoutfs_cmp_u64s(a->xt.name[0], b->xt.name[0]) ?:
|
||||
scoutfs_cmp_u64s(a->xt.name[1], b->xt.name[1]) ?:
|
||||
scoutfs_cmp_u64s(a->xt.name[2], b->xt.name[2]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record the contribution of the three classes of logged items we can
|
||||
* see: the item in the fs_root, items from finalized log btrees, and
|
||||
* items from active log btrees. Once we have the full set the caller
|
||||
* can decide which of the items contribute to the total it sends to the
|
||||
* user.
|
||||
*/
|
||||
static int read_xattr_total_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic, void *arg)
|
||||
{
|
||||
struct scoutfs_xattr_totl_val *tval = val;
|
||||
struct xattr_total_entry *ent;
|
||||
struct xattr_total_entry rd;
|
||||
struct rb_root *root = arg;
|
||||
struct rb_node *parent;
|
||||
struct rb_node **node;
|
||||
int cmp;
|
||||
|
||||
rd.xt.name[0] = le64_to_cpu(key->skxt_a);
|
||||
rd.xt.name[1] = le64_to_cpu(key->skxt_b);
|
||||
rd.xt.name[2] = le64_to_cpu(key->skxt_c);
|
||||
|
||||
/* find entry matching name */
|
||||
node = &root->rb_node;
|
||||
parent = NULL;
|
||||
cmp = -1;
|
||||
while (*node) {
|
||||
parent = *node;
|
||||
ent = container_of(*node, struct xattr_total_entry, node);
|
||||
|
||||
/* sort merge items by key then newest to oldest */
|
||||
cmp = cmp_xt_entry_name(&rd, ent);
|
||||
if (cmp < 0)
|
||||
node = &(*node)->rb_left;
|
||||
else if (cmp > 0)
|
||||
node = &(*node)->rb_right;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/* allocate and insert new node if we need to */
|
||||
if (cmp != 0) {
|
||||
ent = kzalloc(sizeof(*ent), GFP_KERNEL);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&ent->xt.name, &rd.xt.name, sizeof(ent->xt.name));
|
||||
|
||||
rb_link_node(&ent->node, parent, node);
|
||||
rb_insert_color(&ent->node, root);
|
||||
}
|
||||
|
||||
if (fic & FIC_FS_ROOT) {
|
||||
ent->fs_seq = seq;
|
||||
ent->fs_total = le64_to_cpu(tval->total);
|
||||
ent->fs_count = le64_to_cpu(tval->count);
|
||||
} else if (fic & FIC_FINALIZED) {
|
||||
ent->fin_seq = seq;
|
||||
ent->fin_total += le64_to_cpu(tval->total);
|
||||
ent->fin_count += le64_to_cpu(tval->count);
|
||||
} else {
|
||||
ent->log_seq = seq;
|
||||
ent->log_total += le64_to_cpu(tval->total);
|
||||
ent->log_count += le64_to_cpu(tval->count);
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, totl_read_item);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* these are always _safe, node stores next */
|
||||
#define for_each_xt_ent(ent, node, root) \
|
||||
for (node = rb_first(root); \
|
||||
node && (ent = rb_entry(node, struct xattr_total_entry, node), \
|
||||
node = rb_next(node), 1); )
|
||||
|
||||
#define for_each_xt_ent_reverse(ent, node, root) \
|
||||
for (node = rb_last(root); \
|
||||
node && (ent = rb_entry(node, struct xattr_total_entry, node), \
|
||||
node = rb_prev(node), 1); )
|
||||
|
||||
static void free_xt_ent(struct rb_root *root, struct xattr_total_entry *ent)
|
||||
{
|
||||
rb_erase(&ent->node, root);
|
||||
kfree(ent);
|
||||
}
|
||||
|
||||
static void free_all_xt_ents(struct rb_root *root)
|
||||
{
|
||||
struct xattr_total_entry *ent;
|
||||
struct rb_node *node;
|
||||
|
||||
for_each_xt_ent(ent, node, root)
|
||||
free_xt_ent(root, ent);
|
||||
}
|
||||
|
||||
/*
|
||||
* Starting from the caller's pos_name, copy the names, totals, and
|
||||
* counts for the .totl. tagged xattrs in the system sorted by their
|
||||
* name until the user's buffer is full. This only sees xattrs that
|
||||
* have been committed. It doesn't use locking to force commits and
|
||||
* block writers so it can be a little bit out of date with respect to
|
||||
* dirty xattrs in memory across the system.
|
||||
*
|
||||
* Our reader has to be careful because the log btree merging code can
|
||||
* write partial results to the fs_root. This means that a reader can
|
||||
* see both cases where new finalized logs should be applied to the old
|
||||
* fs items and where old finalized logs have already been applied to
|
||||
* the partially merged fs items. Currently active logged items are
|
||||
* always applied on top of all cases.
|
||||
*
|
||||
* These cases are differentiated with a combination of sequence numbers
|
||||
* in items, the count of contributing xattrs, and a flag
|
||||
* differentiating finalized and active logged items. This lets us
|
||||
* recognize all cases, including when finalized logs were merged and
|
||||
* deleted the fs item.
|
||||
*
|
||||
* We're allocating a tracking struct for each totl name we see while
|
||||
* traversing the item btrees. The forest reader is providing the items
|
||||
* it finds in leaf blocks that contain the search key. In the worst
|
||||
* case all of these blocks are full and none of the items overlap. At
|
||||
* most, figure order a thousand names per mount. But in practice many
|
||||
* of these factors fall away: leaf blocks aren't fill, leaf items
|
||||
* overlap, there aren't finalized log btrees, and not all mounts are
|
||||
* actively changing totals. We're much more likely to only read a
|
||||
* leaf block's worth of totals that have been long since merged into
|
||||
* the fs_root.
|
||||
*/
|
||||
static long scoutfs_ioc_read_xattr_totals(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct super_block *sb = file_inode(file)->i_sb;
|
||||
struct scoutfs_ioctl_read_xattr_totals __user *urxt = (void __user *)arg;
|
||||
struct scoutfs_ioctl_read_xattr_totals rxt;
|
||||
struct scoutfs_ioctl_xattr_total __user *uxt;
|
||||
struct xattr_total_entry *ent;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_key bloom_key;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct rb_root root = RB_ROOT;
|
||||
struct rb_node *node;
|
||||
int count = 0;
|
||||
int ret;
|
||||
|
||||
if (!(file->f_mode & FMODE_READ)) {
|
||||
ret = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (copy_from_user(&rxt, urxt, sizeof(rxt))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
uxt = (void __user *)rxt.totals_ptr;
|
||||
|
||||
if ((rxt.totals_ptr & (sizeof(__u64) - 1)) ||
|
||||
(rxt.totals_bytes < sizeof(struct scoutfs_ioctl_xattr_total))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_key_set_zeros(&bloom_key);
|
||||
bloom_key.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_xattr_init_totl_key(&start, rxt.pos_name);
|
||||
|
||||
while (rxt.totals_bytes >= sizeof(struct scoutfs_ioctl_xattr_total)) {
|
||||
|
||||
scoutfs_key_set_ones(&end);
|
||||
end.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
if (scoutfs_key_compare(&start, &end) > 0)
|
||||
break;
|
||||
|
||||
key = start;
|
||||
ret = scoutfs_forest_read_items(sb, &key, &bloom_key, &start, &end,
|
||||
read_xattr_total_item, &root);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE) {
|
||||
free_all_xt_ents(&root);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (RB_EMPTY_ROOT(&root))
|
||||
break;
|
||||
|
||||
/* trim totals that fall outside of the consistent range */
|
||||
for_each_xt_ent(ent, node, &root) {
|
||||
scoutfs_xattr_init_totl_key(&key, ent->xt.name);
|
||||
if (scoutfs_key_compare(&key, &start) < 0) {
|
||||
free_xt_ent(&root, ent);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
for_each_xt_ent_reverse(ent, node, &root) {
|
||||
scoutfs_xattr_init_totl_key(&key, ent->xt.name);
|
||||
if (scoutfs_key_compare(&key, &end) > 0) {
|
||||
free_xt_ent(&root, ent);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* copy resulting unique non-zero totals to userspace */
|
||||
for_each_xt_ent(ent, node, &root) {
|
||||
if (rxt.totals_bytes < sizeof(ent->xt))
|
||||
break;
|
||||
|
||||
/* start with the fs item if we have it */
|
||||
if (ent->fs_seq != 0) {
|
||||
ent->xt.total = ent->fs_total;
|
||||
ent->xt.count = ent->fs_count;
|
||||
scoutfs_inc_counter(sb, totl_read_fs);
|
||||
}
|
||||
|
||||
/* apply finalized logs if they're newer or creating */
|
||||
if (((ent->fs_seq != 0) && (ent->fin_seq > ent->fs_seq)) ||
|
||||
((ent->fs_seq == 0) && (ent->fin_count > 0))) {
|
||||
ent->xt.total += ent->fin_total;
|
||||
ent->xt.count += ent->fin_count;
|
||||
scoutfs_inc_counter(sb, totl_read_finalized);
|
||||
}
|
||||
|
||||
/* always apply active logs which must be newer than fs and finalized */
|
||||
if (ent->log_seq > 0) {
|
||||
ent->xt.total += ent->log_total;
|
||||
ent->xt.count += ent->log_count;
|
||||
scoutfs_inc_counter(sb, totl_read_logged);
|
||||
}
|
||||
|
||||
if (ent->xt.total != 0 || ent->xt.count != 0) {
|
||||
if (copy_to_user(uxt, &ent->xt, sizeof(ent->xt))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
uxt++;
|
||||
rxt.totals_bytes -= sizeof(ent->xt);
|
||||
count++;
|
||||
scoutfs_inc_counter(sb, totl_read_copied);
|
||||
}
|
||||
|
||||
free_xt_ent(&root, ent);
|
||||
}
|
||||
|
||||
/* continue after the last possible key read */
|
||||
start = end;
|
||||
scoutfs_key_inc(&start);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
free_all_xt_ents(&root);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
|
||||
static long scoutfs_ioc_get_allocated_inos(struct file *file, unsigned long arg)
|
||||
{
|
||||
struct super_block *sb = file_inode(file)->i_sb;
|
||||
struct scoutfs_ioctl_get_allocated_inos __user *ugai = (void __user *)arg;
|
||||
struct scoutfs_ioctl_get_allocated_inos gai;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_key end;
|
||||
u64 __user *uinos;
|
||||
u64 bytes;
|
||||
u64 ino;
|
||||
int nr;
|
||||
int ret;
|
||||
|
||||
if (!(file->f_mode & FMODE_READ)) {
|
||||
ret = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (copy_from_user(&gai, ugai, sizeof(gai))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((gai.inos_ptr & (sizeof(__u64) - 1)) || (gai.inos_bytes < sizeof(__u64))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_inode_init_key(&key, gai.start_ino);
|
||||
scoutfs_inode_init_key(&end, gai.start_ino | SCOUTFS_LOCK_INODE_GROUP_MASK);
|
||||
uinos = (void __user *)gai.inos_ptr;
|
||||
bytes = gai.inos_bytes;
|
||||
nr = 0;
|
||||
|
||||
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_READ, 0, gai.start_ino, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
while (bytes >= sizeof(*uinos)) {
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &end, NULL, 0, lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (key.sk_zone != SCOUTFS_FS_ZONE) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* all fs items are owned by allocated inodes, and _first is always ino */
|
||||
ino = le64_to_cpu(key._sk_first);
|
||||
if (put_user(ino, uinos)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
uinos++;
|
||||
bytes -= sizeof(*uinos);
|
||||
if (++nr == INT_MAX)
|
||||
break;
|
||||
|
||||
scoutfs_inode_init_key(&key, ino + 1);
|
||||
}
|
||||
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
out:
|
||||
return ret ?: nr;
|
||||
}
|
||||
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
@@ -1020,6 +1427,12 @@ long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
return scoutfs_ioc_alloc_detail(file, arg);
|
||||
case SCOUTFS_IOC_MOVE_BLOCKS:
|
||||
return scoutfs_ioc_move_blocks(file, arg);
|
||||
case SCOUTFS_IOC_RESIZE_DEVICES:
|
||||
return scoutfs_ioc_resize_devices(file, arg);
|
||||
case SCOUTFS_IOC_READ_XATTR_TOTALS:
|
||||
return scoutfs_ioc_read_xattr_totals(file, arg);
|
||||
case SCOUTFS_IOC_GET_ALLOCATED_INOS:
|
||||
return scoutfs_ioc_get_allocated_inos(file, arg);
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
|
||||
151
kmod/src/ioctl.h
151
kmod/src/ioctl.h
@@ -13,8 +13,7 @@
|
||||
* This is enforced by pahole scripting in external build environments.
|
||||
*/
|
||||
|
||||
/* XXX I have no idea how these are chosen. */
|
||||
#define SCOUTFS_IOCTL_MAGIC 's'
|
||||
#define SCOUTFS_IOCTL_MAGIC 0xE8 /* arbitrarily chosen hole in ioctl-number.rst */
|
||||
|
||||
/*
|
||||
* Packed scoutfs keys rarely cross the ioctl boundary so we have a
|
||||
@@ -88,7 +87,7 @@ enum scoutfs_ino_walk_seq_type {
|
||||
* Adds entries to the user's buffer for each inode that is found in the
|
||||
* given index between the first and last positions.
|
||||
*/
|
||||
#define SCOUTFS_IOC_WALK_INODES _IOR(SCOUTFS_IOCTL_MAGIC, 1, \
|
||||
#define SCOUTFS_IOC_WALK_INODES _IOW(SCOUTFS_IOCTL_MAGIC, 1, \
|
||||
struct scoutfs_ioctl_walk_inodes)
|
||||
|
||||
/*
|
||||
@@ -167,7 +166,7 @@ struct scoutfs_ioctl_ino_path_result {
|
||||
};
|
||||
|
||||
/* Get a single path from the root to the given inode number */
|
||||
#define SCOUTFS_IOC_INO_PATH _IOR(SCOUTFS_IOCTL_MAGIC, 2, \
|
||||
#define SCOUTFS_IOC_INO_PATH _IOW(SCOUTFS_IOCTL_MAGIC, 2, \
|
||||
struct scoutfs_ioctl_ino_path)
|
||||
|
||||
/*
|
||||
@@ -215,23 +214,16 @@ struct scoutfs_ioctl_stage {
|
||||
/*
|
||||
* Give the user inode fields that are not otherwise visible. statx()
|
||||
* isn't always available and xattrs are relatively expensive.
|
||||
*
|
||||
* @valid_bytes stores the number of bytes that are valid in the
|
||||
* structure. The caller sets this to the size of the struct that they
|
||||
* understand. The kernel then fills and copies back the min of the
|
||||
* size they and the user caller understand. The user can tell if a
|
||||
* field is set if all of its bytes are within the valid_bytes that the
|
||||
* kernel set on return.
|
||||
*
|
||||
* New fields are only added to the end of the struct.
|
||||
*/
|
||||
struct scoutfs_ioctl_stat_more {
|
||||
__u64 valid_bytes;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 crtime_sec;
|
||||
__u32 crtime_nsec;
|
||||
__u8 _pad[4];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_STAT_MORE _IOR(SCOUTFS_IOCTL_MAGIC, 5, \
|
||||
@@ -261,13 +253,14 @@ struct scoutfs_ioctl_data_waiting {
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U64_MAX << 0)
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOR(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOW(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
struct scoutfs_ioctl_data_waiting)
|
||||
|
||||
/*
|
||||
* If i_size is set then data_version must be non-zero. If the offline
|
||||
* flag is set then i_size must be set and a offline extent will be
|
||||
* created from offset 0 to i_size.
|
||||
* created from offset 0 to i_size. The time fields are always applied
|
||||
* to the inode.
|
||||
*/
|
||||
struct scoutfs_ioctl_setattr_more {
|
||||
__u64 data_version;
|
||||
@@ -275,7 +268,8 @@ struct scoutfs_ioctl_setattr_more {
|
||||
__u64 flags;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u8 _pad[4];
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_OFFLINE (1 << 0)
|
||||
@@ -291,8 +285,8 @@ struct scoutfs_ioctl_listxattr_hidden {
|
||||
__u32 hash_pos;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_LISTXATTR_HIDDEN _IOR(SCOUTFS_IOCTL_MAGIC, 8, \
|
||||
struct scoutfs_ioctl_listxattr_hidden)
|
||||
#define SCOUTFS_IOC_LISTXATTR_HIDDEN _IOWR(SCOUTFS_IOCTL_MAGIC, 8, \
|
||||
struct scoutfs_ioctl_listxattr_hidden)
|
||||
|
||||
/*
|
||||
* Return the inode numbers of inodes which might contain the given
|
||||
@@ -345,32 +339,23 @@ struct scoutfs_ioctl_search_xattrs {
|
||||
/* set in output_flags if returned inodes reached last_ino */
|
||||
#define SCOUTFS_SEARCH_XATTRS_OFLAG_END (1ULL << 0)
|
||||
|
||||
#define SCOUTFS_IOC_SEARCH_XATTRS _IOR(SCOUTFS_IOCTL_MAGIC, 9, \
|
||||
struct scoutfs_ioctl_search_xattrs)
|
||||
#define SCOUTFS_IOC_SEARCH_XATTRS _IOW(SCOUTFS_IOCTL_MAGIC, 9, \
|
||||
struct scoutfs_ioctl_search_xattrs)
|
||||
|
||||
/*
|
||||
* Give the user information about the filesystem.
|
||||
*
|
||||
* @valid_bytes stores the number of bytes that are valid in the
|
||||
* structure. The caller sets this to the size of the struct that they
|
||||
* understand. The kernel then fills and copies back the min of the
|
||||
* size they and the user caller understand. The user can tell if a
|
||||
* field is set if all of its bytes are within the valid_bytes that the
|
||||
* kernel set on return.
|
||||
*
|
||||
* @committed_seq: All seqs up to and including this seq have been
|
||||
* committed. Can be compared with meta_seq and data_seq from inodes in
|
||||
* stat_more to discover if changes have been committed to disk.
|
||||
*
|
||||
* New fields are only added to the end of the struct.
|
||||
*/
|
||||
struct scoutfs_ioctl_statfs_more {
|
||||
__u64 valid_bytes;
|
||||
__u64 fsid;
|
||||
__u64 rid;
|
||||
__u64 committed_seq;
|
||||
__u64 total_meta_blocks;
|
||||
__u64 total_data_blocks;
|
||||
__u64 reserved_meta_blocks;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_STATFS_MORE _IOR(SCOUTFS_IOCTL_MAGIC, 10, \
|
||||
@@ -391,7 +376,7 @@ struct scoutfs_ioctl_data_wait_err {
|
||||
__s64 err;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAIT_ERR _IOR(SCOUTFS_IOCTL_MAGIC, 11, \
|
||||
#define SCOUTFS_IOC_DATA_WAIT_ERR _IOW(SCOUTFS_IOCTL_MAGIC, 11, \
|
||||
struct scoutfs_ioctl_data_wait_err)
|
||||
|
||||
|
||||
@@ -410,7 +395,7 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOR(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOW(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
struct scoutfs_ioctl_alloc_detail)
|
||||
|
||||
/*
|
||||
@@ -473,7 +458,105 @@ struct scoutfs_ioctl_move_blocks {
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOR(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOW(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
struct scoutfs_ioctl_move_blocks)
|
||||
|
||||
struct scoutfs_ioctl_resize_devices {
|
||||
__u64 new_total_meta_blocks;
|
||||
__u64 new_total_data_blocks;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_RESIZE_DEVICES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 14, struct scoutfs_ioctl_resize_devices)
|
||||
|
||||
#define SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR 3
|
||||
|
||||
/*
|
||||
* Copy global totals of .totl. xattr value payloads to the user. This
|
||||
* only sees xattrs which have been committed and this doesn't force
|
||||
* commits of dirty data throughout the system. This can be out of sync
|
||||
* by the amount of xattrs that can be dirty in open transactions that
|
||||
* are being built throughout the system.
|
||||
*
|
||||
* pos_name: The array name of the first total that can be returned.
|
||||
* The name is derived from the key of the xattrs that contribute to the
|
||||
* total. For xattrs with a .totl.1.2.3 key, the pos_name[] should be
|
||||
* {1, 2, 3}.
|
||||
*
|
||||
* totals_ptr: An aligned pointer to a buffer that will be filled with
|
||||
* an array of scoutfs_ioctl_xattr_total structs for each total copied.
|
||||
*
|
||||
* totals_bytes: The size of the buffer in bytes. There must be room
|
||||
* for at least one struct element so that returning 0 can promise that
|
||||
* there were no more totals to copy after the pos_name.
|
||||
*
|
||||
* The number of copied elements is returned and 0 is returned if there
|
||||
* were no more totals to copy after the pos_name.
|
||||
*
|
||||
* In addition to the usual errnos (EIO, EINVAL, EPERM, EFAULT) this
|
||||
* adds:
|
||||
*
|
||||
* EINVAL: The totals_ buffer was not aligned or was not large enough
|
||||
* for a single struct entry.
|
||||
*/
|
||||
struct scoutfs_ioctl_read_xattr_totals {
|
||||
__u64 pos_name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
|
||||
__u64 totals_ptr;
|
||||
__u64 totals_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* An individual total that is given to userspace. The total is the
|
||||
* sum of all the values in the xattr payloads matching the name. The
|
||||
* count is the number of xattrs, not number of files, contributing to
|
||||
* the total.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_total {
|
||||
__u64 name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
|
||||
__u64 total;
|
||||
__u64 count;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_TOTALS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 15, struct scoutfs_ioctl_read_xattr_totals)
|
||||
|
||||
/*
|
||||
* This fills the caller's inos array with inode numbers that are in use
|
||||
* after the start ino, within an internal inode group.
|
||||
*
|
||||
* This only makes a promise about the state of the inode numbers within
|
||||
* the first and last numbers returned by one call. At one time, all of
|
||||
* those inodes were still allocated. They could have changed before
|
||||
* the call returned. And any numbers outside of the first and last
|
||||
* (or single) are undefined.
|
||||
*
|
||||
* This doesn't iterate over all allocated inodes, it only probes a
|
||||
* single group that the start inode is within. This interface was
|
||||
* first introduced to support tests that needed to find out about a
|
||||
* specific inode, while having some other similarly niche uses. It is
|
||||
* unsuitable for a consistent iteration over all the inode numbers in
|
||||
* use.
|
||||
*
|
||||
* This test of inode items doesn't serialize with the inode lifetime
|
||||
* mechanism. It only tells you the numbers of inodes that were once
|
||||
* active in the system and haven't yet been fully deleted. The inode
|
||||
* numbers returned could have been in the process of being deleted and
|
||||
* were already unreachable even before the call started.
|
||||
*
|
||||
* @start_ino: the first inode number that could be returned
|
||||
* @inos_ptr: pointer to an aligned array of 64bit inode numbers
|
||||
* @inos_bytes: the number of bytes available in the inos_ptr array
|
||||
*
|
||||
* Returns errors or the count of inode numbers returned, quite possibly
|
||||
* including 0.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_allocated_inos {
|
||||
__u64 start_ino;
|
||||
__u64 inos_ptr;
|
||||
__u64 inos_bytes;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_GET_ALLOCATED_INOS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 16, struct scoutfs_ioctl_get_allocated_inos)
|
||||
|
||||
#endif
|
||||
|
||||
373
kmod/src/item.c
373
kmod/src/item.c
@@ -95,7 +95,7 @@ struct item_cache_info {
|
||||
|
||||
/* written by page readers, read by shrink */
|
||||
spinlock_t active_lock;
|
||||
struct rb_root active_root;
|
||||
struct list_head active_list;
|
||||
};
|
||||
|
||||
#define DECLARE_ITEM_CACHE_INFO(sb, name) \
|
||||
@@ -127,6 +127,7 @@ struct cached_page {
|
||||
unsigned long lru_time;
|
||||
struct list_head dirty_list;
|
||||
struct list_head dirty_head;
|
||||
u64 max_seq;
|
||||
struct page *page;
|
||||
unsigned int page_off;
|
||||
unsigned int erased_bytes;
|
||||
@@ -138,10 +139,11 @@ struct cached_item {
|
||||
struct list_head dirty_head;
|
||||
unsigned int dirty:1, /* needs to be written */
|
||||
persistent:1, /* in btrees, needs deletion item */
|
||||
deletion:1; /* negative del item for writing */
|
||||
deletion:1, /* negative del item for writing */
|
||||
delta:1; /* item vales are combined, freed after write */
|
||||
unsigned int val_len;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_log_item_value liv;
|
||||
u64 seq;
|
||||
char val[0];
|
||||
};
|
||||
|
||||
@@ -149,7 +151,8 @@ struct cached_item {
|
||||
|
||||
static int item_val_bytes(int val_len)
|
||||
{
|
||||
return round_up(offsetof(struct cached_item, val[val_len]), CACHED_ITEM_ALIGN);
|
||||
return round_up(offsetof(struct cached_item, val[val_len]),
|
||||
CACHED_ITEM_ALIGN);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -345,7 +348,8 @@ static struct cached_page *alloc_pg(struct super_block *sb, gfp_t gfp)
|
||||
page = alloc_page(GFP_NOFS | gfp);
|
||||
if (!page || !pg) {
|
||||
kfree(pg);
|
||||
__free_page(page);
|
||||
if (page)
|
||||
__free_page(page);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -383,6 +387,12 @@ static void put_pg(struct super_block *sb, struct cached_page *pg)
|
||||
}
|
||||
}
|
||||
|
||||
static void update_pg_max_seq(struct cached_page *pg, struct cached_item *item)
|
||||
{
|
||||
if (item->seq > pg->max_seq)
|
||||
pg->max_seq = item->seq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate space for a new item from the free offset at the end of a
|
||||
* cached page. This isn't a blocking allocation, and it's likely that
|
||||
@@ -390,8 +400,7 @@ static void put_pg(struct super_block *sb, struct cached_page *pg)
|
||||
* page or checking the free space first.
|
||||
*/
|
||||
static struct cached_item *alloc_item(struct cached_page *pg,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_log_item_value *liv,
|
||||
struct scoutfs_key *key, u64 seq, bool deletion,
|
||||
void *val, int val_len)
|
||||
{
|
||||
struct cached_item *item;
|
||||
@@ -406,22 +415,24 @@ static struct cached_item *alloc_item(struct cached_page *pg,
|
||||
INIT_LIST_HEAD(&item->dirty_head);
|
||||
item->dirty = 0;
|
||||
item->persistent = 0;
|
||||
item->deletion = !!(liv->flags & SCOUTFS_LOG_ITEM_FLAG_DELETION);
|
||||
item->deletion = !!deletion;
|
||||
item->delta = 0;
|
||||
item->val_len = val_len;
|
||||
item->key = *key;
|
||||
item->liv = *liv;
|
||||
item->seq = seq;
|
||||
|
||||
if (val_len)
|
||||
memcpy(item->val, val, val_len);
|
||||
|
||||
update_pg_max_seq(pg, item);
|
||||
|
||||
return item;
|
||||
}
|
||||
|
||||
static void erase_item(struct cached_page *pg, struct cached_item *item)
|
||||
{
|
||||
rbtree_erase(&item->node, &pg->item_root);
|
||||
pg->erased_bytes += round_up(item_val_bytes(item->val_len),
|
||||
CACHED_ITEM_ALIGN);
|
||||
pg->erased_bytes += item_val_bytes(item->val_len);
|
||||
}
|
||||
|
||||
static void lru_add(struct super_block *sb, struct item_cache_info *cinf,
|
||||
@@ -621,6 +632,8 @@ static void mark_item_dirty(struct super_block *sb,
|
||||
list_add_tail(&item->dirty_head, &pg->dirty_list);
|
||||
item->dirty = 1;
|
||||
}
|
||||
|
||||
update_pg_max_seq(pg, item);
|
||||
}
|
||||
|
||||
static void clear_item_dirty(struct super_block *sb,
|
||||
@@ -672,6 +685,12 @@ static void erase_page_items(struct cached_page *pg,
|
||||
* to the dirty list after the left page, and by adding items to the
|
||||
* tail of right's dirty list in key sort order.
|
||||
*
|
||||
* The max_seq of the source page might be larger than all the items
|
||||
* while protecting an erased item from being reclaimed while an older
|
||||
* read is in flight. We don't know where it might be in the source
|
||||
* page so we have to assume that it's in the key range being moved and
|
||||
* update the destination page's max_seq accordingly.
|
||||
*
|
||||
* The caller is responsible for page locking and managing the lru.
|
||||
*/
|
||||
static void move_page_items(struct super_block *sb,
|
||||
@@ -697,7 +716,7 @@ static void move_page_items(struct super_block *sb,
|
||||
if (stop && scoutfs_key_compare(&from->key, stop) >= 0)
|
||||
break;
|
||||
|
||||
to = alloc_item(right, &from->key, &from->liv, from->val,
|
||||
to = alloc_item(right, &from->key, from->seq, from->deletion, from->val,
|
||||
from->val_len);
|
||||
rbtree_insert(&to->node, par, pnode, &right->item_root);
|
||||
par = &to->node;
|
||||
@@ -709,10 +728,13 @@ static void move_page_items(struct super_block *sb,
|
||||
}
|
||||
|
||||
to->persistent = from->persistent;
|
||||
to->deletion = from->deletion;
|
||||
to->delta = from->delta;
|
||||
|
||||
erase_item(left, from);
|
||||
}
|
||||
|
||||
if (left->max_seq > right->max_seq)
|
||||
right->max_seq = left->max_seq;
|
||||
}
|
||||
|
||||
enum page_intersection_type {
|
||||
@@ -852,8 +874,7 @@ static void compact_page_items(struct super_block *sb,
|
||||
|
||||
for (from = first_item(&pg->item_root); from; from = next_item(from)) {
|
||||
to = page_address(empty->page) + page_off;
|
||||
page_off += round_up(item_val_bytes(from->val_len),
|
||||
CACHED_ITEM_ALIGN);
|
||||
page_off += item_val_bytes(from->val_len);
|
||||
|
||||
/* copy the entire item, struct members and all */
|
||||
memcpy(to, from, item_val_bytes(from->val_len));
|
||||
@@ -1260,46 +1281,76 @@ static int cache_empty_page(struct super_block *sb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Readers operate independently from dirty items and transactions.
|
||||
* They read a set of persistent items and insert them into the cache
|
||||
* when there aren't already pages whose key range contains the items.
|
||||
* This naturally prefers cached dirty items over stale read items.
|
||||
*
|
||||
* We have to deal with the case where dirty items are written and
|
||||
* invalidated while a read is in flight. The reader won't have seen
|
||||
* the items that were dirty in their persistent roots as they started
|
||||
* reading. By the time they insert their read pages the previously
|
||||
* dirty items have been reclaimed and are not in the cache. The old
|
||||
* stale items will be inserted in their place, effectively corrupting
|
||||
* by having the dirty items disappear.
|
||||
*
|
||||
* We fix this by tracking the max seq of items in pages. As readers
|
||||
* start they record the current transaction seq. Invalidation skips
|
||||
* pages with a max seq greater than the first reader seq because the
|
||||
* items in the page have to stick around to prevent the readers stale
|
||||
* items from being inserted.
|
||||
*
|
||||
* This naturally only affects a small set of pages with items that were
|
||||
* written relatively recently. If we're in memory pressure then we
|
||||
* probably have a lot of pages and they'll naturally have items that
|
||||
* were visible to any raders. We don't bother with the complicated and
|
||||
* expensive further refinement of tracking the ranges that are being
|
||||
* read and comparing those with pages to invalidate.
|
||||
*/
|
||||
struct active_reader {
|
||||
struct rb_node node;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct list_head head;
|
||||
u64 seq;
|
||||
};
|
||||
|
||||
static struct active_reader *active_rbtree_walk(struct rb_root *root,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
struct rb_node **par,
|
||||
struct rb_node ***pnode)
|
||||
#define INIT_ACTIVE_READER(rdr) \
|
||||
struct active_reader rdr = { .head = LIST_HEAD_INIT(rdr.head) }
|
||||
|
||||
static void add_active_reader(struct super_block *sb, struct active_reader *active)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
|
||||
BUG_ON(!list_empty(&active->head));
|
||||
|
||||
active->seq = scoutfs_trans_sample_seq(sb);
|
||||
|
||||
spin_lock(&cinf->active_lock);
|
||||
list_add_tail(&active->head, &cinf->active_list);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
}
|
||||
|
||||
static u64 first_active_reader_seq(struct item_cache_info *cinf)
|
||||
{
|
||||
struct rb_node **node = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct active_reader *ret = NULL;
|
||||
struct active_reader *active;
|
||||
int cmp;
|
||||
u64 first;
|
||||
|
||||
while (*node) {
|
||||
parent = *node;
|
||||
active = container_of(*node, struct active_reader, node);
|
||||
/* only the calling task adds or deletes this active */
|
||||
spin_lock(&cinf->active_lock);
|
||||
active = list_first_entry_or_null(&cinf->active_list, struct active_reader, head);
|
||||
first = active ? active->seq : U64_MAX;
|
||||
spin_unlock(&cinf->active_lock);
|
||||
|
||||
cmp = scoutfs_key_compare_ranges(start, end, &active->start,
|
||||
&active->end);
|
||||
if (cmp < 0) {
|
||||
node = &(*node)->rb_left;
|
||||
} else if (cmp > 0) {
|
||||
node = &(*node)->rb_right;
|
||||
} else {
|
||||
ret = active;
|
||||
node = &(*node)->rb_left;
|
||||
}
|
||||
return first;
|
||||
}
|
||||
|
||||
static void del_active_reader(struct item_cache_info *cinf, struct active_reader *active)
|
||||
{
|
||||
/* only the calling task adds or deletes this active */
|
||||
if (!list_empty(&active->head)) {
|
||||
spin_lock(&cinf->active_lock);
|
||||
list_del_init(&active->head);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
}
|
||||
|
||||
if (par)
|
||||
*par = parent;
|
||||
if (pnode)
|
||||
*pnode = node;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1308,16 +1359,16 @@ static struct active_reader *active_rbtree_walk(struct rb_root *root,
|
||||
* on our root and aren't in dirty or lru lists.
|
||||
*
|
||||
* We need to store deletion items here as we read items from all the
|
||||
* btrees so that they can override older versions of the items. The
|
||||
* deletion items will be deleted before we insert the pages into the
|
||||
* cache. We don't insert old versions of items into the tree here so
|
||||
* that the trees don't have to compare versions.
|
||||
* btrees so that they can override older items. The deletion items
|
||||
* will be deleted before we insert the pages into the cache. We don't
|
||||
* insert old versions of items into the tree here so that the trees
|
||||
* don't have to compare seqs.
|
||||
*/
|
||||
static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_log_item_value *liv, void *val,
|
||||
int val_len, void *arg)
|
||||
static int read_page_item(struct super_block *sb, struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
void *val, int val_len, int fic, void *arg)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
const bool deletion = !!(flags & SCOUTFS_ITEM_FLAG_DELETION);
|
||||
struct rb_root *root = arg;
|
||||
struct cached_page *right = NULL;
|
||||
struct cached_page *left = NULL;
|
||||
@@ -1331,7 +1382,7 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
pg = page_rbtree_walk(sb, root, key, key, NULL, NULL, &p_par, &p_pnode);
|
||||
found = item_rbtree_walk(&pg->item_root, key, NULL, &par, &pnode);
|
||||
if (found && (le64_to_cpu(found->liv.vers) >= le64_to_cpu(liv->vers)))
|
||||
if (found && (found->seq >= seq))
|
||||
return 0;
|
||||
|
||||
if (!page_has_room(pg, val_len)) {
|
||||
@@ -1345,7 +1396,7 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
&pnode);
|
||||
}
|
||||
|
||||
item = alloc_item(pg, key, liv, val, val_len);
|
||||
item = alloc_item(pg, key, seq, deletion, val, val_len);
|
||||
if (!item) {
|
||||
/* simpler split of private pages, no locking/dirty/lru */
|
||||
if (!left)
|
||||
@@ -1368,7 +1419,7 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
put_pg(sb, pg);
|
||||
|
||||
pg = scoutfs_key_compare(key, &left->end) <= 0 ? left : right;
|
||||
item = alloc_item(pg, key, liv, val, val_len);
|
||||
item = alloc_item(pg, key, seq, deletion, val, val_len);
|
||||
found = item_rbtree_walk(&pg->item_root, key, NULL, &par,
|
||||
&pnode);
|
||||
|
||||
@@ -1399,22 +1450,20 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
* locks held, but without locking the cache. The regions we read can
|
||||
* be stale with respect to the current cache, which can be read and
|
||||
* dirtied by other cluster lock holders on our node, but the cluster
|
||||
* locks protect the stable items we read.
|
||||
* locks protect the stable items we read. Invalidation is careful not
|
||||
* to drop pages that have items that we couldn't see because they were
|
||||
* dirty when we started reading.
|
||||
*
|
||||
* There's also the exciting case where a reader can populate the cache
|
||||
* with stale old persistent data which was read before another local
|
||||
* cluster lock holder was able to read, dirty, write, and then shrink
|
||||
* the cache. In this case the cache couldn't be cleared by lock
|
||||
* invalidation because the caller is actively holding the lock. But
|
||||
* shrinking could evict the cache within the held lock. So we record
|
||||
* that we're an active reader in the range covered by the lock and
|
||||
* shrink will refuse to reclaim any pages that intersect with our read.
|
||||
* The forest item reader is reading stable trees that could be
|
||||
* overwritten. It can return -ESTALE which we return to the caller who
|
||||
* will retry the operation and work with a new set of more recent
|
||||
* btrees.
|
||||
*/
|
||||
static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
struct scoutfs_key *key, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct rb_root root = RB_ROOT;
|
||||
struct active_reader active;
|
||||
INIT_ACTIVE_READER(active);
|
||||
struct cached_page *right = NULL;
|
||||
struct cached_page *pg;
|
||||
struct cached_page *rd;
|
||||
@@ -1430,15 +1479,6 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
int pgi;
|
||||
int ret;
|
||||
|
||||
/* stop shrink from freeing new clean data, would let us cache stale */
|
||||
active.start = lock->start;
|
||||
active.end = lock->end;
|
||||
spin_lock(&cinf->active_lock);
|
||||
active_rbtree_walk(&cinf->active_root, &active.start, &active.end,
|
||||
&par, &pnode);
|
||||
rbtree_insert(&active.node, par, pnode, &cinf->active_root);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
|
||||
/* start with an empty page that covers the whole lock */
|
||||
pg = alloc_pg(sb, 0);
|
||||
if (!pg) {
|
||||
@@ -1449,8 +1489,12 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
pg->end = lock->end;
|
||||
rbtree_insert(&pg->node, NULL, &root.rb_node, &root);
|
||||
|
||||
ret = scoutfs_forest_read_items(sb, lock, key, &start, &end,
|
||||
read_page_item, &root);
|
||||
/* set active reader seq before reading persistent roots */
|
||||
add_active_reader(sb, &active);
|
||||
|
||||
start = lock->start;
|
||||
end = lock->end;
|
||||
ret = scoutfs_forest_read_items(sb, key, &lock->start, &start, &end, read_page_item, &root);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1526,9 +1570,7 @@ retry:
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
spin_lock(&cinf->active_lock);
|
||||
rbtree_erase(&active.node, &cinf->active_root);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
del_active_reader(cinf, &active);
|
||||
|
||||
/* free any pages we left dangling on error */
|
||||
for_each_page_safe(&root, rd, pg_tmp) {
|
||||
@@ -1587,7 +1629,7 @@ retry:
|
||||
&lock->end);
|
||||
else
|
||||
ret = read_pages(sb, cinf, key, lock);
|
||||
if (ret < 0)
|
||||
if (ret < 0 && ret != -ESTALE)
|
||||
goto out;
|
||||
goto retry;
|
||||
}
|
||||
@@ -1783,6 +1825,21 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* An item's seq is greater of the client transaction's seq and the
|
||||
* lock's write_seq. This ensures that multiple commits in one lock
|
||||
* grant will have increasing seqs, and new locks in open commits will
|
||||
* also increase the seqs. It lets us limit the inputs of item merging
|
||||
* to the last stable seq and ensure that all the items in open
|
||||
* transactions and granted locks will have greater seqs.
|
||||
*/
|
||||
static u64 item_seq(struct super_block *sb, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return max(sbi->trans_seq, lock->write_seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the item dirty. Dirtying while holding a transaction pins the
|
||||
* page holding the item and guarantees that the item can be deleted or
|
||||
@@ -1815,8 +1872,8 @@ int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key *key,
|
||||
if (!item || item->deletion) {
|
||||
ret = -ENOENT;
|
||||
} else {
|
||||
item->seq = item_seq(sb, lock);
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
item->liv.vers = cpu_to_le64(lock->write_version);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@@ -1835,9 +1892,7 @@ static int item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
int mode, bool force)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
struct scoutfs_log_item_value liv = {
|
||||
.vers = cpu_to_le64(lock->write_version),
|
||||
};
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
struct cached_item *found;
|
||||
struct cached_item *item;
|
||||
struct cached_page *pg;
|
||||
@@ -1865,7 +1920,7 @@ static int item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
item = alloc_item(pg, key, &liv, val, val_len);
|
||||
item = alloc_item(pg, key, seq, false, val, val_len);
|
||||
rbtree_insert(&item->node, par, pnode, &pg->item_root);
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
|
||||
@@ -1910,9 +1965,7 @@ int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
struct scoutfs_log_item_value liv = {
|
||||
.vers = cpu_to_le64(lock->write_version),
|
||||
};
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
struct cached_item *item;
|
||||
struct cached_item *found;
|
||||
struct cached_page *pg;
|
||||
@@ -1944,12 +1997,13 @@ int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
if (val_len)
|
||||
memcpy(found->val, val, val_len);
|
||||
if (val_len < found->val_len)
|
||||
pg->erased_bytes += found->val_len - val_len;
|
||||
pg->erased_bytes += item_val_bytes(found->val_len) -
|
||||
item_val_bytes(val_len);
|
||||
found->val_len = val_len;
|
||||
found->liv.vers = liv.vers;
|
||||
found->seq = seq;
|
||||
mark_item_dirty(sb, cinf, pg, NULL, found);
|
||||
} else {
|
||||
item = alloc_item(pg, key, &liv, val, val_len);
|
||||
item = alloc_item(pg, key, seq, false, val, val_len);
|
||||
item->persistent = found->persistent;
|
||||
rbtree_insert(&item->node, par, pnode, &pg->item_root);
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
@@ -1965,6 +2019,77 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a delta item. Delta items are an incremental change relative to
|
||||
* the current persistent delta items. We never have to read the
|
||||
* current items so the caller always writes with write only locks. If
|
||||
* combining the current delta item and the caller's item results in a
|
||||
* null we can just drop it, we don't have to emit a deletion item.
|
||||
*/
|
||||
int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
struct cached_item *item;
|
||||
struct cached_page *pg;
|
||||
struct rb_node **pnode;
|
||||
struct rb_node *par;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, item_delta);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = get_cached_page(sb, cinf, lock, key, true, true, val_len, &pg);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
__acquire(pg->rwlock);
|
||||
|
||||
item = item_rbtree_walk(&pg->item_root, key, NULL, &par, &pnode);
|
||||
if (item) {
|
||||
if (!item->delta) {
|
||||
ret = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = scoutfs_forest_combine_deltas(key, item->val, item->val_len, val, val_len);
|
||||
if (ret <= 0) {
|
||||
if (ret == 0)
|
||||
ret = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (ret == SCOUTFS_DELTA_COMBINED) {
|
||||
item->seq = seq;
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
} else if (ret == SCOUTFS_DELTA_COMBINED_NULL) {
|
||||
clear_item_dirty(sb, cinf, pg, item);
|
||||
erase_item(pg, item);
|
||||
} else {
|
||||
ret = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
ret = 0;
|
||||
} else {
|
||||
item = alloc_item(pg, key, seq, false, val, val_len);
|
||||
rbtree_insert(&item->node, par, pnode, &pg->item_root);
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
item->delta = 1;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
unlock:
|
||||
write_unlock(&pg->rwlock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete an item from the cache. We can leave behind a dirty deletion
|
||||
* item if there is a persistent item that needs to be overwritten.
|
||||
@@ -1977,9 +2102,7 @@ static int item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock, int mode, bool force)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
struct scoutfs_log_item_value liv = {
|
||||
.vers = cpu_to_le64(lock->write_version),
|
||||
};
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
struct cached_item *item;
|
||||
struct cached_page *pg;
|
||||
struct rb_node **pnode;
|
||||
@@ -2007,7 +2130,7 @@ static int item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
}
|
||||
|
||||
if (!item) {
|
||||
item = alloc_item(pg, key, &liv, NULL, 0);
|
||||
item = alloc_item(pg, key, seq, false, NULL, 0);
|
||||
rbtree_insert(&item->node, par, pnode, &pg->item_root);
|
||||
}
|
||||
|
||||
@@ -2020,10 +2143,10 @@ static int item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
erase_item(pg, item);
|
||||
} else {
|
||||
/* must emit deletion to clobber old persistent item */
|
||||
item->liv.vers = cpu_to_le64(lock->write_version);
|
||||
item->liv.flags |= SCOUTFS_LOG_ITEM_FLAG_DELETION;
|
||||
item->seq = seq;
|
||||
item->deletion = 1;
|
||||
pg->erased_bytes += item->val_len;
|
||||
pg->erased_bytes += item_val_bytes(item->val_len) -
|
||||
item_val_bytes(0);
|
||||
item->val_len = 0;
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
}
|
||||
@@ -2106,17 +2229,11 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
struct page *page;
|
||||
LIST_HEAD(pages);
|
||||
LIST_HEAD(pos);
|
||||
u64 max_vers = 0;
|
||||
int val_len;
|
||||
u64 max_seq = 0;
|
||||
int bytes;
|
||||
int off;
|
||||
int ret;
|
||||
|
||||
/* we're relying on struct layout to prepend item value headers */
|
||||
BUILD_BUG_ON(offsetof(struct cached_item, val) !=
|
||||
(offsetof(struct cached_item, liv) +
|
||||
member_sizeof(struct cached_item, liv)));
|
||||
|
||||
if (atomic_read(&cinf->dirty_pages) == 0)
|
||||
return 0;
|
||||
|
||||
@@ -2168,10 +2285,9 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
list_sort(NULL, &pg->dirty_list, cmp_item_key);
|
||||
|
||||
list_for_each_entry(item, &pg->dirty_list, dirty_head) {
|
||||
val_len = sizeof(item->liv) + item->val_len;
|
||||
bytes = offsetof(struct scoutfs_btree_item_list,
|
||||
val[val_len]);
|
||||
max_vers = max(max_vers, le64_to_cpu(item->liv.vers));
|
||||
val[item->val_len]);
|
||||
max_seq = max(max_seq, item->seq);
|
||||
|
||||
if (off + bytes > PAGE_SIZE) {
|
||||
page = second;
|
||||
@@ -2187,8 +2303,10 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
prev = &lst->next;
|
||||
|
||||
lst->key = item->key;
|
||||
lst->val_len = val_len;
|
||||
memcpy(lst->val, &item->liv, val_len);
|
||||
lst->seq = item->seq;
|
||||
lst->flags = item->deletion ? SCOUTFS_ITEM_FLAG_DELETION : 0;
|
||||
lst->val_len = item->val_len;
|
||||
memcpy(lst->val, item->val, item->val_len);
|
||||
}
|
||||
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
@@ -2201,8 +2319,8 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
read_unlock(&pg->rwlock);
|
||||
}
|
||||
|
||||
/* store max item vers in forest's log_trees */
|
||||
scoutfs_forest_set_max_vers(sb, max_vers);
|
||||
/* store max item seq in forest's log_trees */
|
||||
scoutfs_forest_set_max_seq(sb, max_seq);
|
||||
|
||||
/* write all the dirty items into log btree blocks */
|
||||
ret = scoutfs_forest_insert_list(sb, first);
|
||||
@@ -2246,8 +2364,11 @@ retry:
|
||||
dirty_head) {
|
||||
clear_item_dirty(sb, cinf, pg, item);
|
||||
|
||||
if (item->delta)
|
||||
scoutfs_inc_counter(sb, item_delta_written);
|
||||
|
||||
/* free deletion items */
|
||||
if (item->deletion)
|
||||
if (item->deletion || item->delta)
|
||||
erase_item(pg, item);
|
||||
else
|
||||
item->persistent = 1;
|
||||
@@ -2389,9 +2510,9 @@ retry:
|
||||
|
||||
/*
|
||||
* Shrink the size the item cache. We're operating against the fast
|
||||
* path lock ordering and we skip pages if we can't acquire locks.
|
||||
* Similarly, we can run into dirty pages or pages which intersect with
|
||||
* active readers that we can't shrink and also choose to skip.
|
||||
* path lock ordering and we skip pages if we can't acquire locks. We
|
||||
* can run into dirty pages or pages with items that weren't visible to
|
||||
* the earliest active reader which must be skipped.
|
||||
*/
|
||||
static int item_lru_shrink(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
@@ -2400,26 +2521,24 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
struct item_cache_info,
|
||||
shrinker);
|
||||
struct super_block *sb = cinf->sb;
|
||||
struct active_reader *active;
|
||||
struct cached_page *tmp;
|
||||
struct cached_page *pg;
|
||||
u64 first_reader_seq;
|
||||
int nr;
|
||||
|
||||
if (sc->nr_to_scan == 0)
|
||||
goto out;
|
||||
nr = sc->nr_to_scan;
|
||||
|
||||
/* can't invalidate pages with items that weren't visible to first reader */
|
||||
first_reader_seq = first_active_reader_seq(cinf);
|
||||
|
||||
write_lock(&cinf->rwlock);
|
||||
spin_lock(&cinf->lru_lock);
|
||||
|
||||
list_for_each_entry_safe(pg, tmp, &cinf->lru_list, lru_head) {
|
||||
|
||||
/* can't invalidate ranges being read, reader might be stale */
|
||||
spin_lock(&cinf->active_lock);
|
||||
active = active_rbtree_walk(&cinf->active_root, &pg->start,
|
||||
&pg->end, NULL, NULL);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
if (active) {
|
||||
if (first_reader_seq <= pg->max_seq) {
|
||||
scoutfs_inc_counter(sb, item_shrink_page_reader);
|
||||
continue;
|
||||
}
|
||||
@@ -2488,7 +2607,7 @@ int scoutfs_item_setup(struct super_block *sb)
|
||||
spin_lock_init(&cinf->lru_lock);
|
||||
INIT_LIST_HEAD(&cinf->lru_list);
|
||||
spin_lock_init(&cinf->active_lock);
|
||||
cinf->active_root = RB_ROOT;
|
||||
INIT_LIST_HEAD(&cinf->active_list);
|
||||
|
||||
cinf->pcpu_pages = alloc_percpu(struct item_percpu_pages);
|
||||
if (!cinf->pcpu_pages)
|
||||
@@ -2519,7 +2638,7 @@ void scoutfs_item_destroy(struct super_block *sb)
|
||||
int cpu;
|
||||
|
||||
if (cinf) {
|
||||
BUG_ON(!RB_EMPTY_ROOT(&cinf->active_root));
|
||||
BUG_ON(!list_empty(&cinf->active_list));
|
||||
|
||||
unregister_hotcpu_notifier(&cinf->notifier);
|
||||
unregister_shrinker(&cinf->shrinker);
|
||||
|
||||
@@ -18,6 +18,8 @@ int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete_force(struct super_block *sb,
|
||||
|
||||
@@ -108,6 +108,16 @@ static inline void scoutfs_key_set_ones(struct scoutfs_key *key)
|
||||
memset(key->__pad, 0, sizeof(key->__pad));
|
||||
}
|
||||
|
||||
static inline bool scoutfs_key_is_ones(struct scoutfs_key *key)
|
||||
{
|
||||
return key->sk_zone == U8_MAX &&
|
||||
key->_sk_first == cpu_to_le64(U64_MAX) &&
|
||||
key->sk_type == U8_MAX &&
|
||||
key->_sk_second == cpu_to_le64(U64_MAX) &&
|
||||
key->_sk_third == cpu_to_le64(U64_MAX) &&
|
||||
key->_sk_fourth == U8_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a -1/0/1 comparison of keys.
|
||||
*
|
||||
|
||||
326
kmod/src/lock.c
326
kmod/src/lock.c
@@ -80,15 +80,11 @@ struct lock_info {
|
||||
struct list_head lru_list;
|
||||
unsigned long long lru_nr;
|
||||
struct workqueue_struct *workq;
|
||||
struct work_struct grant_work;
|
||||
struct list_head grant_list;
|
||||
struct work_struct inv_work;
|
||||
struct list_head inv_list;
|
||||
struct work_struct shrink_work;
|
||||
struct list_head shrink_list;
|
||||
atomic64_t next_refresh_gen;
|
||||
struct work_struct inv_iput_work;
|
||||
struct llist_head inv_iput_llist;
|
||||
|
||||
struct dentry *tseq_dentry;
|
||||
struct scoutfs_tseq_tree tseq_tree;
|
||||
@@ -124,34 +120,6 @@ static bool lock_modes_match(int granted, int requested)
|
||||
requested == SCOUTFS_LOCK_READ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Final iput can get into evict and perform final inode deletion which
|
||||
* can delete a lot of items under locks and transactions. We really
|
||||
* don't want to be doing all that in an iput during invalidation. When
|
||||
* invalidation sees that iput might perform final deletion it puts them
|
||||
* on a list and queues this work.
|
||||
*
|
||||
* Nothing stops multiple puts for multiple invalidations of an inode
|
||||
* before the work runs so we can track multiple puts in flight.
|
||||
*/
|
||||
static void lock_inv_iput_worker(struct work_struct *work)
|
||||
{
|
||||
struct lock_info *linfo = container_of(work, struct lock_info, inv_iput_work);
|
||||
struct scoutfs_inode_info *si;
|
||||
struct scoutfs_inode_info *tmp;
|
||||
struct llist_node *inodes;
|
||||
bool more;
|
||||
|
||||
inodes = llist_del_all(&linfo->inv_iput_llist);
|
||||
|
||||
llist_for_each_entry_safe(si, tmp, inodes, inv_iput_llnode) {
|
||||
do {
|
||||
more = atomic_dec_return(&si->inv_iput_count) > 0;
|
||||
iput(&si->inode);
|
||||
} while (more);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate cached data associated with an inode whose lock is going
|
||||
* away.
|
||||
@@ -192,11 +160,8 @@ static void invalidate_inode(struct super_block *sb, u64 ino)
|
||||
if (scoutfs_lock_is_covered(sb, &si->ino_lock_cov) && inode->i_nlink > 0) {
|
||||
iput(inode);
|
||||
} else {
|
||||
/* defer iput to work context so we don't evict inodes from invalidation */
|
||||
if (atomic_inc_return(&si->inv_iput_count) == 1)
|
||||
llist_add(&si->inv_iput_llnode, &linfo->inv_iput_llist);
|
||||
smp_wmb(); /* count and list visible before work executes */
|
||||
queue_work(linfo->workq, &linfo->inv_iput_work);
|
||||
/* defer iput to work context so we don't evict inodes from invalidation */
|
||||
scoutfs_inode_queue_iput(inode);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -286,7 +251,6 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
BUG_ON(!RB_EMPTY_NODE(&lock->node));
|
||||
BUG_ON(!RB_EMPTY_NODE(&lock->range_node));
|
||||
BUG_ON(!list_empty(&lock->lru_head));
|
||||
BUG_ON(!list_empty(&lock->grant_head));
|
||||
BUG_ON(!list_empty(&lock->inv_head));
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
BUG_ON(!list_empty(&lock->cov_list));
|
||||
@@ -314,8 +278,8 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
RB_CLEAR_NODE(&lock->node);
|
||||
RB_CLEAR_NODE(&lock->range_node);
|
||||
INIT_LIST_HEAD(&lock->lru_head);
|
||||
INIT_LIST_HEAD(&lock->grant_head);
|
||||
INIT_LIST_HEAD(&lock->inv_head);
|
||||
INIT_LIST_HEAD(&lock->inv_list);
|
||||
INIT_LIST_HEAD(&lock->shrink_head);
|
||||
spin_lock_init(&lock->cov_list_lock);
|
||||
INIT_LIST_HEAD(&lock->cov_list);
|
||||
@@ -578,14 +542,6 @@ static void put_lock(struct lock_info *linfo,struct scoutfs_lock *lock)
|
||||
}
|
||||
}
|
||||
|
||||
static void queue_grant_work(struct lock_info *linfo)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
|
||||
if (!list_empty(&linfo->grant_list))
|
||||
queue_work(linfo->workq, &linfo->grant_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has made a change (set a lock mode) which can let one of the
|
||||
* invalidating locks make forward progress.
|
||||
@@ -643,60 +599,13 @@ static void bug_on_inconsistent_grant_cache(struct super_block *sb,
|
||||
}
|
||||
|
||||
/*
|
||||
* Each lock has received a grant response message from the server.
|
||||
* The client is receiving a grant response message from the server.
|
||||
* This is being called synchronously in the networking receive path so
|
||||
* our work should be quick and reasonably non-blocking.
|
||||
*
|
||||
* Grant responses can be reordered with incoming invalidation requests
|
||||
* from the server so we have to be careful to only set the new mode
|
||||
* once the old mode matches.
|
||||
*/
|
||||
static void lock_grant_worker(struct work_struct *work)
|
||||
{
|
||||
struct lock_info *linfo = container_of(work, struct lock_info,
|
||||
grant_work);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_net_lock *nl;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_grant_work);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->grant_list, grant_head) {
|
||||
nl = &lock->grant_nl;
|
||||
|
||||
/* wait for reordered invalidation to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
continue;
|
||||
|
||||
bug_on_inconsistent_grant_cache(sb, lock, nl->old_mode,
|
||||
nl->new_mode);
|
||||
|
||||
if (!lock_mode_can_read(nl->old_mode) &&
|
||||
lock_mode_can_read(nl->new_mode)) {
|
||||
lock->refresh_gen =
|
||||
atomic64_inc_return(&linfo->next_refresh_gen);
|
||||
}
|
||||
|
||||
lock->request_pending = 0;
|
||||
lock->mode = nl->new_mode;
|
||||
lock->write_version = le64_to_cpu(nl->write_version);
|
||||
|
||||
trace_scoutfs_lock_granted(sb, lock);
|
||||
list_del_init(&lock->grant_head);
|
||||
wake_up(&lock->waitq);
|
||||
put_lock(linfo, lock);
|
||||
}
|
||||
|
||||
/* invalidations might be waiting for our reordered grant */
|
||||
queue_inv_work(linfo);
|
||||
spin_unlock(&linfo->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The client is receiving a grant response message from the server. We
|
||||
* find the lock, record the response, and add it to the list for grant
|
||||
* work to process.
|
||||
* The server's state machine can immediately send an invalidate request
|
||||
* after sending this grant response. We won't process the incoming
|
||||
* invalidate request until after processing this grant response.
|
||||
*/
|
||||
int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
struct scoutfs_net_lock *nl)
|
||||
@@ -714,45 +623,51 @@ int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
trace_scoutfs_lock_grant_response(sb, lock);
|
||||
BUG_ON(!lock->request_pending);
|
||||
|
||||
lock->grant_nl = *nl;
|
||||
list_add_tail(&lock->grant_head, &linfo->grant_list);
|
||||
queue_grant_work(linfo);
|
||||
bug_on_inconsistent_grant_cache(sb, lock, nl->old_mode, nl->new_mode);
|
||||
|
||||
if (!lock_mode_can_read(nl->old_mode) && lock_mode_can_read(nl->new_mode))
|
||||
lock->refresh_gen = atomic64_inc_return(&linfo->next_refresh_gen);
|
||||
|
||||
lock->request_pending = 0;
|
||||
lock->mode = nl->new_mode;
|
||||
lock->write_seq = le64_to_cpu(nl->write_seq);
|
||||
|
||||
trace_scoutfs_lock_granted(sb, lock);
|
||||
wake_up(&lock->waitq);
|
||||
put_lock(linfo, lock);
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct inv_req {
|
||||
struct list_head head;
|
||||
struct scoutfs_lock *lock;
|
||||
u64 net_id;
|
||||
struct scoutfs_net_lock nl;
|
||||
};
|
||||
|
||||
/*
|
||||
* Each lock has received a lock invalidation request from the server
|
||||
* which specifies a new mode for the lock. The server will only send
|
||||
* one invalidation request at a time for each lock. The server can
|
||||
* send another invalidate request after we send the response but before
|
||||
* we reacquire the lock and finish invalidation.
|
||||
* which specifies a new mode for the lock. Our processing state
|
||||
* machine and server failover and lock recovery can both conspire to
|
||||
* give us triplicate invalidation requests. The incoming requests for
|
||||
* a given lock need to be processed in order, but we can process locks
|
||||
* in any order.
|
||||
*
|
||||
* This is an unsolicited request from the server so it can arrive at
|
||||
* any time after we make the server aware of the lock by initially
|
||||
* requesting it. We wait for users of the current mode to unlock
|
||||
* before invalidating.
|
||||
* any time after we make the server aware of the lock. We wait for
|
||||
* users of the current mode to unlock before invalidating.
|
||||
*
|
||||
* This can arrive on behalf of our request for a mode that conflicts
|
||||
* with our current mode. We have to proceed while we have a request
|
||||
* pending. We can also be racing with shrink requests being sent while
|
||||
* we're invalidating.
|
||||
*
|
||||
* This can be processed concurrently and experience reordering with a
|
||||
* grant response sent back-to-back from the server. We carefully only
|
||||
* invalidate once the lock mode matches what the server told us to
|
||||
* invalidate.
|
||||
*
|
||||
* Before we start invalidating the lock we set the lock to the new
|
||||
* mode, preventing further incompatible users of the old mode from
|
||||
* using the lock while we're invalidating.
|
||||
*
|
||||
* This does a lot of serialized inode invalidation in one context and
|
||||
* performs a lot of repeated calls to sync. It would be nice to get
|
||||
* some concurrent inode invalidation and to more carefully only call
|
||||
* sync when needed.
|
||||
*/
|
||||
static void lock_invalidate_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -761,8 +676,8 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
struct scoutfs_net_lock *nl;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
struct inv_req *ireq;
|
||||
LIST_HEAD(ready);
|
||||
u64 net_id;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_work);
|
||||
@@ -770,11 +685,8 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->inv_list, inv_head) {
|
||||
nl = &lock->inv_nl;
|
||||
|
||||
/* wait for reordered grant to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
continue;
|
||||
ireq = list_first_entry(&lock->inv_list, struct inv_req, head);
|
||||
nl = &ireq->nl;
|
||||
|
||||
/* wait until incompatible holders unlock */
|
||||
if (!lock_counts_match(nl->new_mode, lock->users))
|
||||
@@ -794,8 +706,8 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
|
||||
/* invalidate once the lock is read */
|
||||
list_for_each_entry(lock, &ready, inv_head) {
|
||||
nl = &lock->inv_nl;
|
||||
net_id = lock->inv_net_id;
|
||||
ireq = list_first_entry(&lock->inv_list, struct inv_req, head);
|
||||
nl = &ireq->nl;
|
||||
|
||||
/* only lock protocol, inv can't call subsystems after shutdown */
|
||||
if (!linfo->shutdown) {
|
||||
@@ -803,11 +715,10 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
/* allow another request after we respond but before we finish */
|
||||
lock->inv_net_id = 0;
|
||||
|
||||
/* respond with the key and modes from the request */
|
||||
ret = scoutfs_client_lock_response(sb, net_id, nl);
|
||||
/* respond with the key and modes from the request, server might have died */
|
||||
ret = scoutfs_client_lock_response(sb, ireq->net_id, nl);
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
BUG_ON(ret);
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_response);
|
||||
@@ -817,64 +728,87 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &ready, inv_head) {
|
||||
ireq = list_first_entry(&lock->inv_list, struct inv_req, head);
|
||||
|
||||
trace_scoutfs_lock_invalidated(sb, lock);
|
||||
if (lock->inv_net_id == 0) {
|
||||
|
||||
list_del(&ireq->head);
|
||||
kfree(ireq);
|
||||
|
||||
if (list_empty(&lock->inv_list)) {
|
||||
/* finish if another request didn't arrive */
|
||||
list_del_init(&lock->inv_head);
|
||||
lock->invalidate_pending = 0;
|
||||
wake_up(&lock->waitq);
|
||||
} else {
|
||||
/* another request filled nl/net_id, put it back on the list */
|
||||
/* another request arrived, back on the list and requeue */
|
||||
list_move_tail(&lock->inv_head, &linfo->inv_list);
|
||||
queue_inv_work(linfo);
|
||||
}
|
||||
|
||||
put_lock(linfo, lock);
|
||||
}
|
||||
|
||||
/* grant might have been waiting for invalidate request */
|
||||
queue_grant_work(linfo);
|
||||
spin_unlock(&linfo->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record an incoming invalidate request from the server and add its
|
||||
* lock to the list for processing. This request can be from a new
|
||||
* server and racing with invalidation that frees from an old server.
|
||||
* It's fine to not find the requested lock and send an immediate
|
||||
* response.
|
||||
* Add an incoming invalidation request to the end of the list on the
|
||||
* lock and queue it for blocking invalidation work. This is being
|
||||
* called synchronously in the net recv path to avoid reordering with
|
||||
* grants that were sent immediately before the server sent this
|
||||
* invalidation.
|
||||
*
|
||||
* The invalidation process drops the linfo lock to send responses. The
|
||||
* moment it does so we can receive another invalidation request (the
|
||||
* server can ask us to go from write->read then read->null). We allow
|
||||
* for one chain like this but it's a bug if we receive more concurrent
|
||||
* invalidation requests than that. The server should be only sending
|
||||
* one at a time.
|
||||
* Incoming invalidation requests are a function of the remote lock
|
||||
* server's state machine and are slightly decoupled from our lock
|
||||
* state. We can receive duplicate requests if the server is quick
|
||||
* enough to send the next request after we send a previous reply, or if
|
||||
* pending invalidation spans server failover and lock recovery.
|
||||
*
|
||||
* Similarly, we can get a request to invalidate a lock we don't have if
|
||||
* invalidation finished just after lock recovery to a new server.
|
||||
* Happily we can just reply because we satisfy the invalidation
|
||||
* response promise to not be using the old lock's mode if the lock
|
||||
* doesn't exist.
|
||||
*/
|
||||
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct inv_req *ireq;
|
||||
int ret = 0;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_request);
|
||||
|
||||
ireq = kmalloc(sizeof(struct inv_req), GFP_NOFS);
|
||||
BUG_ON(!ireq); /* lock server doesn't handle response errors */
|
||||
if (ireq == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
lock = get_lock(sb, &nl->key);
|
||||
if (lock) {
|
||||
BUG_ON(lock->inv_net_id != 0);
|
||||
lock->inv_net_id = net_id;
|
||||
lock->inv_nl = *nl;
|
||||
if (list_empty(&lock->inv_head)) {
|
||||
trace_scoutfs_lock_invalidate_request(sb, lock);
|
||||
ireq->lock = lock;
|
||||
ireq->net_id = net_id;
|
||||
ireq->nl = *nl;
|
||||
if (list_empty(&lock->inv_list)) {
|
||||
list_add_tail(&lock->inv_head, &linfo->inv_list);
|
||||
lock->invalidate_pending = 1;
|
||||
queue_inv_work(linfo);
|
||||
}
|
||||
trace_scoutfs_lock_invalidate_request(sb, lock);
|
||||
queue_inv_work(linfo);
|
||||
list_add_tail(&ireq->head, &lock->inv_list);
|
||||
}
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
if (!lock)
|
||||
out:
|
||||
if (!lock) {
|
||||
ret = scoutfs_client_lock_response(sb, net_id, nl);
|
||||
BUG_ON(ret); /* lock server doesn't fence timed out client requests */
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -912,7 +846,7 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
for (i = 0; lock && i < SCOUTFS_NET_LOCK_MAX_RECOVER_NR; i++) {
|
||||
|
||||
nlr->locks[i].key = lock->start;
|
||||
nlr->locks[i].write_version = cpu_to_le64(lock->write_version);
|
||||
nlr->locks[i].write_seq = cpu_to_le64(lock->write_seq);
|
||||
nlr->locks[i].old_mode = lock->mode;
|
||||
nlr->locks[i].new_mode = lock->mode;
|
||||
|
||||
@@ -1052,8 +986,14 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
|
||||
|
||||
trace_scoutfs_lock_wait(sb, lock);
|
||||
|
||||
ret = wait_event_interruptible(lock->waitq,
|
||||
lock_wait_cond(sb, lock, mode));
|
||||
if (flags & SCOUTFS_LKF_INTERRUPTIBLE) {
|
||||
ret = wait_event_interruptible(lock->waitq,
|
||||
lock_wait_cond(sb, lock, mode));
|
||||
} else {
|
||||
wait_event(lock->waitq, lock_wait_cond(sb, lock, mode));
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
if (ret)
|
||||
break;
|
||||
@@ -1110,7 +1050,7 @@ int scoutfs_lock_inode(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
goto out;
|
||||
|
||||
if (flags & SCOUTFS_LKF_REFRESH_INODE) {
|
||||
ret = scoutfs_inode_refresh(inode, *lock, flags);
|
||||
ret = scoutfs_inode_refresh(inode, *lock);
|
||||
if (ret < 0) {
|
||||
scoutfs_unlock(sb, *lock, mode);
|
||||
*lock = NULL;
|
||||
@@ -1271,29 +1211,42 @@ int scoutfs_lock_inode_index(struct super_block *sb, enum scoutfs_lock_mode mode
|
||||
}
|
||||
|
||||
/*
|
||||
* The rid lock protects a mount's private persistent items in the rid
|
||||
* zone. It's held for the duration of the mount. It lets the mount
|
||||
* modify the rid items at will and signals to other mounts that we're
|
||||
* still alive and our rid items shouldn't be reclaimed.
|
||||
* Orphan items are stored in their own zone which are modified with
|
||||
* shared write_only locks and are read inconsistently without locks by
|
||||
* background scanning work.
|
||||
*
|
||||
* Being held for the entire mount prevents other nodes from reclaiming
|
||||
* our items, like free blocks, when it would make sense for them to be
|
||||
* able to. Maybe we have a bunch free and they're trying to allocate
|
||||
* and are getting ENOSPC.
|
||||
* Since we only use write_only locks we just lock the entire zone, but
|
||||
* the api provides the inode in case we ever change the locking scheme.
|
||||
*/
|
||||
int scoutfs_lock_rid(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 rid, struct scoutfs_lock **lock)
|
||||
int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int flags, u64 ino,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_RID_ZONE;
|
||||
start.sko_rid = cpu_to_le64(rid);
|
||||
start.sk_zone = SCOUTFS_ORPHAN_ZONE;
|
||||
start.sko_ino = 0;
|
||||
start.sk_type = SCOUTFS_ORPHAN_TYPE;
|
||||
|
||||
scoutfs_key_set_zeros(&end);
|
||||
end.sk_zone = SCOUTFS_ORPHAN_ZONE;
|
||||
end.sko_ino = cpu_to_le64(U64_MAX);
|
||||
end.sk_type = SCOUTFS_ORPHAN_TYPE;
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(&end);
|
||||
end.sk_zone = SCOUTFS_RID_ZONE;
|
||||
end.sko_rid = cpu_to_le64(rid);
|
||||
end.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
@@ -1553,6 +1506,14 @@ void scoutfs_lock_unmount_begin(struct super_block *sb)
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_lock_flush_invalidate(struct super_block *sb)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
|
||||
if (linfo)
|
||||
flush_work(&linfo->inv_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is going to be shutting down transactions and the client.
|
||||
* We need to make sure that locking won't call either after we return.
|
||||
@@ -1616,6 +1577,8 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
struct inv_req *ireq_tmp;
|
||||
struct inv_req *ireq;
|
||||
struct rb_node *node;
|
||||
enum scoutfs_lock_mode mode;
|
||||
|
||||
@@ -1658,15 +1621,21 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
* of free).
|
||||
*/
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
node = rb_first(&linfo->lock_tree);
|
||||
while (node) {
|
||||
lock = rb_entry(node, struct scoutfs_lock, node);
|
||||
node = rb_next(node);
|
||||
|
||||
list_for_each_entry_safe(ireq, ireq_tmp, &lock->inv_list, head) {
|
||||
list_del_init(&ireq->head);
|
||||
put_lock(linfo, ireq->lock);
|
||||
kfree(ireq);
|
||||
}
|
||||
|
||||
lock->request_pending = 0;
|
||||
if (!list_empty(&lock->lru_head))
|
||||
__lock_del_lru(linfo, lock);
|
||||
if (!list_empty(&lock->grant_head))
|
||||
list_del_init(&lock->grant_head);
|
||||
if (!list_empty(&lock->inv_head)) {
|
||||
list_del_init(&lock->inv_head);
|
||||
lock->invalidate_pending = 0;
|
||||
@@ -1676,6 +1645,7 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
lock_remove(linfo, lock);
|
||||
lock_free(linfo, lock);
|
||||
}
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
kfree(linfo);
|
||||
@@ -1700,15 +1670,11 @@ int scoutfs_lock_setup(struct super_block *sb)
|
||||
linfo->shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&linfo->shrinker);
|
||||
INIT_LIST_HEAD(&linfo->lru_list);
|
||||
INIT_WORK(&linfo->grant_work, lock_grant_worker);
|
||||
INIT_LIST_HEAD(&linfo->grant_list);
|
||||
INIT_WORK(&linfo->inv_work, lock_invalidate_worker);
|
||||
INIT_LIST_HEAD(&linfo->inv_list);
|
||||
INIT_WORK(&linfo->shrink_work, lock_shrink_worker);
|
||||
INIT_LIST_HEAD(&linfo->shrink_list);
|
||||
atomic64_set(&linfo->next_refresh_gen, 0);
|
||||
INIT_WORK(&linfo->inv_iput_work, lock_inv_iput_worker);
|
||||
init_llist_head(&linfo->inv_iput_llist);
|
||||
scoutfs_tseq_tree_init(&linfo->tseq_tree, lock_tseq_show);
|
||||
|
||||
sbi->lock_info = linfo;
|
||||
|
||||
@@ -6,14 +6,15 @@
|
||||
|
||||
#define SCOUTFS_LKF_REFRESH_INODE 0x01 /* update stale inode from item */
|
||||
#define SCOUTFS_LKF_NONBLOCK 0x02 /* only use already held locks */
|
||||
#define SCOUTFS_LKF_INVALID (~((SCOUTFS_LKF_NONBLOCK << 1) - 1))
|
||||
#define SCOUTFS_LKF_INTERRUPTIBLE 0x04 /* pending signals return -ERESTARTSYS */
|
||||
#define SCOUTFS_LKF_INVALID (~((SCOUTFS_LKF_INTERRUPTIBLE << 1) - 1))
|
||||
|
||||
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
|
||||
|
||||
struct scoutfs_omap_lock;
|
||||
|
||||
/*
|
||||
* A few fields (start, end, refresh_gen, write_version, granted_mode)
|
||||
* A few fields (start, end, refresh_gen, write_seq, granted_mode)
|
||||
* are referenced by code outside lock.c.
|
||||
*/
|
||||
struct scoutfs_lock {
|
||||
@@ -23,18 +24,15 @@ struct scoutfs_lock {
|
||||
struct rb_node node;
|
||||
struct rb_node range_node;
|
||||
u64 refresh_gen;
|
||||
u64 write_version;
|
||||
u64 write_seq;
|
||||
u64 dirty_trans_seq;
|
||||
struct list_head lru_head;
|
||||
wait_queue_head_t waitq;
|
||||
unsigned long request_pending:1,
|
||||
invalidate_pending:1;
|
||||
|
||||
struct list_head grant_head;
|
||||
struct scoutfs_net_lock grant_nl;
|
||||
struct list_head inv_head;
|
||||
struct scoutfs_net_lock inv_nl;
|
||||
u64 inv_net_id;
|
||||
struct list_head inv_head; /* entry in linfo's list of locks with invalidations */
|
||||
struct list_head inv_list; /* list of lock's invalidation requests */
|
||||
struct list_head shrink_head;
|
||||
|
||||
spinlock_t cov_list_lock;
|
||||
@@ -84,8 +82,10 @@ int scoutfs_lock_inodes(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
struct inode *d, struct scoutfs_lock **D_lock);
|
||||
int scoutfs_lock_rename(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_rid(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 rid, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 ino, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
@@ -104,6 +104,7 @@ void scoutfs_free_unused_locks(struct super_block *sb);
|
||||
|
||||
int scoutfs_lock_setup(struct super_block *sb);
|
||||
void scoutfs_lock_unmount_begin(struct super_block *sb);
|
||||
void scoutfs_lock_flush_invalidate(struct super_block *sb);
|
||||
void scoutfs_lock_shutdown(struct super_block *sb);
|
||||
void scoutfs_lock_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -80,11 +80,6 @@ struct lock_server_info {
|
||||
struct dentry *tseq_dentry;
|
||||
struct scoutfs_tseq_tree stats_tseq_tree;
|
||||
struct dentry *stats_tseq_dentry;
|
||||
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
|
||||
atomic64_t write_version;
|
||||
};
|
||||
|
||||
#define DECLARE_LOCK_SERVER_INFO(sb, name) \
|
||||
@@ -158,30 +153,30 @@ enum {
|
||||
*/
|
||||
static void add_client_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
struct client_lock_entry *clent)
|
||||
struct client_lock_entry *c_ent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (list_empty(&clent->head))
|
||||
list_add_tail(&clent->head, list);
|
||||
if (list_empty(&c_ent->head))
|
||||
list_add_tail(&c_ent->head, list);
|
||||
else
|
||||
list_move_tail(&clent->head, list);
|
||||
list_move_tail(&c_ent->head, list);
|
||||
|
||||
clent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
c_ent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
list == &snode->requested ? OL_REQUESTED :
|
||||
OL_INVALIDATED;
|
||||
}
|
||||
|
||||
static void free_client_entry(struct lock_server_info *inf,
|
||||
struct server_lock_node *snode,
|
||||
struct client_lock_entry *clent)
|
||||
struct client_lock_entry *c_ent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (!list_empty(&clent->head))
|
||||
list_del_init(&clent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &clent->tseq_entry);
|
||||
kfree(clent);
|
||||
if (!list_empty(&c_ent->head))
|
||||
list_del_init(&c_ent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
kfree(c_ent);
|
||||
}
|
||||
|
||||
static bool invalid_mode(u8 mode)
|
||||
@@ -344,13 +339,13 @@ static struct client_lock_entry *find_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
u64 rid)
|
||||
{
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
list_for_each_entry(clent, list, head) {
|
||||
if (clent->rid == rid)
|
||||
return clent;
|
||||
list_for_each_entry(c_ent, list, head) {
|
||||
if (c_ent->rid == rid)
|
||||
return c_ent;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -369,7 +364,7 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
u64 net_id, struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -381,29 +376,29 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = net_id;
|
||||
clent->mode = nl->new_mode;
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = net_id;
|
||||
c_ent->mode = nl->new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nl->key);
|
||||
if (snode == NULL) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snode->stats[SLT_REQUEST]++;
|
||||
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
out:
|
||||
@@ -422,7 +417,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -443,18 +438,18 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
|
||||
snode->stats[SLT_RESPONSE]++;
|
||||
|
||||
clent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!clent) {
|
||||
c_ent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!c_ent) {
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nl->new_mode == SCOUTFS_LOCK_NULL) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
} else {
|
||||
clent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
c_ent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
}
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
@@ -492,14 +487,14 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
struct client_lock_entry *req_tmp;
|
||||
struct client_lock_entry *gr;
|
||||
struct client_lock_entry *gr_tmp;
|
||||
u64 wv;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
/* processing waits for all invalidation responses or recovery */
|
||||
if (!list_empty(&snode->invalidated) ||
|
||||
scoutfs_recov_next_pending(sb, SCOUTFS_RECOV_LOCKS) != 0) {
|
||||
scoutfs_recov_next_pending(sb, 0, SCOUTFS_RECOV_LOCKS) != 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -534,6 +529,7 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
|
||||
nl.key = snode->key;
|
||||
nl.new_mode = req->mode;
|
||||
nl.write_seq = 0;
|
||||
|
||||
/* see if there's an existing compatible grant to replace */
|
||||
gr = find_entry(snode, &snode->granted, req->rid);
|
||||
@@ -546,8 +542,9 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
|
||||
if (nl.new_mode == SCOUTFS_LOCK_WRITE ||
|
||||
nl.new_mode == SCOUTFS_LOCK_WRITE_ONLY) {
|
||||
wv = atomic64_inc_return(&inf->write_version);
|
||||
nl.write_version = cpu_to_le64(wv);
|
||||
/* doesn't commit seq update, recovered with locks */
|
||||
seq = scoutfs_server_next_seq(sb);
|
||||
nl.write_seq = cpu_to_le64(seq);
|
||||
}
|
||||
|
||||
ret = scoutfs_server_lock_response(sb, req->rid,
|
||||
@@ -624,14 +621,6 @@ int scoutfs_lock_server_finished_recovery(struct super_block *sb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_max_write_version(struct lock_server_info *inf, u64 new)
|
||||
{
|
||||
u64 old;
|
||||
|
||||
while (new > (old = atomic64_read(&inf->write_version)) &&
|
||||
(atomic64_cmpxchg(&inf->write_version, old, new) != old));
|
||||
}
|
||||
|
||||
/*
|
||||
* We sent a lock recover request to the client when we received its
|
||||
* greeting while in recovery. Here we instantiate all the locks it
|
||||
@@ -643,7 +632,7 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *existing;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
@@ -663,41 +652,41 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
}
|
||||
|
||||
for (i = 0; i < le16_to_cpu(nlr->nr); i++) {
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = 0;
|
||||
clent->mode = nlr->locks[i].new_mode;
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = 0;
|
||||
c_ent->mode = nlr->locks[i].new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nlr->locks[i].key);
|
||||
if (snode == NULL) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
existing = find_entry(snode, &snode->granted, rid);
|
||||
if (existing) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
/* make sure next write lock is greater than all recovered */
|
||||
set_max_write_version(inf,
|
||||
le64_to_cpu(nlr->locks[i].write_version));
|
||||
/* make sure next core seq is greater than all lock write seq */
|
||||
scoutfs_server_set_seq_if_greater(sb,
|
||||
le64_to_cpu(nlr->locks[i].write_seq));
|
||||
}
|
||||
|
||||
/* send request for next batch of keys */
|
||||
@@ -718,7 +707,7 @@ out:
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *tmp;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
@@ -735,9 +724,9 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
(list == &snode->requested) ? &snode->invalidated :
|
||||
NULL) {
|
||||
|
||||
list_for_each_entry_safe(clent, tmp, list, head) {
|
||||
if (clent->rid == rid) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
list_for_each_entry_safe(c_ent, tmp, list, head) {
|
||||
if (c_ent->rid == rid) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
freed = true;
|
||||
}
|
||||
}
|
||||
@@ -798,15 +787,15 @@ static char *lock_on_list_string(u8 on_list)
|
||||
static void lock_server_tseq_show(struct seq_file *m,
|
||||
struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
struct client_lock_entry *clent = container_of(ent,
|
||||
struct client_lock_entry *c_ent = container_of(ent,
|
||||
struct client_lock_entry,
|
||||
tseq_entry);
|
||||
struct server_lock_node *snode = clent->snode;
|
||||
struct server_lock_node *snode = c_ent->snode;
|
||||
|
||||
seq_printf(m, SK_FMT" %s %s rid %016llx net_id %llu\n",
|
||||
SK_ARG(&snode->key), lock_mode_string(clent->mode),
|
||||
lock_on_list_string(clent->on_list), clent->rid,
|
||||
clent->net_id);
|
||||
SK_ARG(&snode->key), lock_mode_string(c_ent->mode),
|
||||
lock_on_list_string(c_ent->on_list), c_ent->rid,
|
||||
c_ent->net_id);
|
||||
}
|
||||
|
||||
static void stats_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
@@ -823,9 +812,7 @@ static void stats_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
* Setup the lock server. This is called before networking can deliver
|
||||
* requests.
|
||||
*/
|
||||
int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 max_vers)
|
||||
int scoutfs_lock_server_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct lock_server_info *inf;
|
||||
@@ -839,9 +826,6 @@ int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
inf->locks_root = RB_ROOT;
|
||||
scoutfs_tseq_tree_init(&inf->tseq_tree, lock_server_tseq_show);
|
||||
scoutfs_tseq_tree_init(&inf->stats_tseq_tree, stats_tseq_show);
|
||||
inf->alloc = alloc;
|
||||
inf->wri = wri;
|
||||
atomic64_set(&inf->write_version, max_vers); /* inc_return gives +1 */
|
||||
|
||||
inf->tseq_dentry = scoutfs_tseq_create("server_locks", sbi->debug_root,
|
||||
&inf->tseq_tree);
|
||||
@@ -850,8 +834,8 @@ int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
inf->stats_tseq_dentry = scoutfs_tseq_create("tmp_lock_stats", sbi->debug_root,
|
||||
&inf->stats_tseq_tree);
|
||||
inf->stats_tseq_dentry = scoutfs_tseq_create("server_lock_stats", sbi->debug_root,
|
||||
&inf->stats_tseq_tree);
|
||||
if (!inf->stats_tseq_dentry) {
|
||||
debugfs_remove(inf->tseq_dentry);
|
||||
kfree(inf);
|
||||
@@ -873,7 +857,7 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct server_lock_node *snode;
|
||||
struct server_lock_node *stmp;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *ctmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
@@ -889,8 +873,8 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
list_splice_init(&snode->invalidated, &list);
|
||||
|
||||
mutex_lock(&snode->mutex);
|
||||
list_for_each_entry_safe(clent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
list_for_each_entry_safe(c_ent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
}
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
|
||||
@@ -11,9 +11,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid);
|
||||
|
||||
int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 max_vers);
|
||||
int scoutfs_lock_server_setup(struct super_block *sb);
|
||||
void scoutfs_lock_server_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include "key.h"
|
||||
#include "counters.h"
|
||||
#include "super.h"
|
||||
|
||||
void __printf(4, 5) scoutfs_msg(struct super_block *sb, const char *prefix,
|
||||
const char *str, const char *fmt, ...);
|
||||
@@ -23,6 +24,9 @@ do { \
|
||||
#define scoutfs_info(sb, fmt, args...) \
|
||||
scoutfs_msg_check(sb, KERN_INFO, "", fmt, ##args)
|
||||
|
||||
#define scoutfs_tprintk(sb, fmt, args...) \
|
||||
trace_printk(SCSBF " " fmt "\n", SCSB_ARGS(sb), ##args);
|
||||
|
||||
#define scoutfs_bug_on(sb, cond, fmt, args...) \
|
||||
do { \
|
||||
if (cond) { \
|
||||
|
||||
173
kmod/src/net.c
173
kmod/src/net.c
@@ -30,6 +30,7 @@
|
||||
#include "net.h"
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
#include "fence.h"
|
||||
|
||||
/*
|
||||
* scoutfs networking delivers requests and responses between nodes.
|
||||
@@ -330,6 +331,9 @@ static int submit_send(struct super_block *sb,
|
||||
WARN_ON_ONCE(id == 0 && (flags & SCOUTFS_NET_FLAG_RESPONSE)))
|
||||
return -EINVAL;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
msend = kmalloc(offsetof(struct message_send,
|
||||
nh.data[data_len]), GFP_NOFS);
|
||||
if (!msend)
|
||||
@@ -420,6 +424,16 @@ static int process_request(struct scoutfs_net_connection *conn,
|
||||
mrecv->nh.data, le16_to_cpu(mrecv->nh.data_len));
|
||||
}
|
||||
|
||||
static int call_resp_func(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
scoutfs_net_response_t resp_func, void *resp_data,
|
||||
void *resp, unsigned int resp_len, int error)
|
||||
{
|
||||
if (resp_func)
|
||||
return resp_func(sb, conn, resp, resp_len, error, resp_data);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* An incoming response finds the queued request and calls its response
|
||||
* function. The response function for a given request will only be
|
||||
@@ -434,7 +448,6 @@ static int process_response(struct scoutfs_net_connection *conn,
|
||||
struct message_send *msend;
|
||||
scoutfs_net_response_t resp_func = NULL;
|
||||
void *resp_data;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
@@ -449,11 +462,8 @@ static int process_response(struct scoutfs_net_connection *conn,
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
if (resp_func)
|
||||
ret = resp_func(sb, conn, mrecv->nh.data,
|
||||
le16_to_cpu(mrecv->nh.data_len),
|
||||
net_err_to_host(mrecv->nh.error), resp_data);
|
||||
return ret;
|
||||
return call_resp_func(sb, conn, resp_func, resp_data, mrecv->nh.data,
|
||||
le16_to_cpu(mrecv->nh.data_len), net_err_to_host(mrecv->nh.error));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -619,8 +629,6 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_recv_clock_sync(nh.clock_sync_id);
|
||||
|
||||
data_len = le16_to_cpu(nh.data_len);
|
||||
|
||||
scoutfs_inc_counter(sb, net_recv_messages);
|
||||
@@ -667,8 +675,15 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
|
||||
scoutfs_tseq_add(&ninf->msg_tseq_tree, &mrecv->tseq_entry);
|
||||
|
||||
/* synchronously process greeting before next recvmsg */
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING)
|
||||
/*
|
||||
* Initial received greetings are processed
|
||||
* synchronously before any other incoming messages.
|
||||
*
|
||||
* Incoming requests or responses to the lock client are
|
||||
* called synchronously to avoid reordering.
|
||||
*/
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING ||
|
||||
(nh.cmd == SCOUTFS_NET_CMD_LOCK && !conn->listening_conn))
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
else
|
||||
queue_work(conn->workq, &mrecv->proc_work);
|
||||
@@ -768,9 +783,6 @@ static void scoutfs_net_send_worker(struct work_struct *work)
|
||||
trace_scoutfs_net_send_message(sb, &conn->sockname,
|
||||
&conn->peername, &msend->nh);
|
||||
|
||||
msend->nh.clock_sync_id = scoutfs_clock_sync_id();
|
||||
trace_scoutfs_send_clock_sync(msend->nh.clock_sync_id);
|
||||
|
||||
ret = sendmsg_full(conn->sock, &msend->nh, len);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
@@ -823,11 +835,9 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
if (conn->listening_conn && conn->notify_down)
|
||||
conn->notify_down(sb, conn, conn->info, conn->rid);
|
||||
|
||||
/* free all messages, refactor and complete for forced unmount? */
|
||||
list_splice_init(&conn->resend_queue, &conn->send_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head) {
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head)
|
||||
free_msend(ninf, msend);
|
||||
}
|
||||
|
||||
/* accepted sockets are removed from their listener's list */
|
||||
if (conn->listening_conn) {
|
||||
@@ -857,13 +867,31 @@ static void destroy_conn(struct scoutfs_net_connection *conn)
|
||||
}
|
||||
|
||||
/*
|
||||
* Have a pretty aggressive keepalive timeout of around 10 seconds. The
|
||||
* TCP keepalives are being processed out of task context so they should
|
||||
* be responsive even when mounts are under load.
|
||||
* By default, TCP would maintain a connection to an unresponsive peer
|
||||
* for a very long time indeed. We can't do that because quorum
|
||||
* members will only participate in an election when they don't have a
|
||||
* healthy connection to a server. We use the KEEPALIVE* and
|
||||
* TCP_USER_TIMEOUT options to ensure that we'll break an unresponsive
|
||||
* connection and return to the quorum and client connection paths to
|
||||
* try and establish a new connection to an active server.
|
||||
*
|
||||
* The TCP_KEEP* and TCP_USER_TIMEOUT option interaction is subtle.
|
||||
* TCP_USER_TIMEOUT only applies if there is unacked written data in the
|
||||
* send queue. It doesn't work if the connection is idle. Adding
|
||||
* keepalice probes with user_timeout set changes how the keepalive
|
||||
* timeout is calculated. CNT no longer matters. Each time
|
||||
* additional probes (not the first) are sent the user timeout is
|
||||
* checked against the last time data was received. If none of the
|
||||
* keepalives are responded to then eventually the user timeout applies.
|
||||
*
|
||||
* Given all this, we start with the overall unresponsive timeout. Then
|
||||
* we set the probes to start sending towards the end of the timeout.
|
||||
* We give it a few tries for a successful response before the timeout
|
||||
* elapses during the probe timer processing after the unsuccessful
|
||||
* probes.
|
||||
*/
|
||||
#define KEEPCNT 3
|
||||
#define KEEPIDLE 7
|
||||
#define KEEPINTVL 1
|
||||
#define UNRESPONSIVE_TIMEOUT_SECS 10
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
struct socket *sock)
|
||||
{
|
||||
@@ -872,7 +900,7 @@ static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
int optval;
|
||||
int ret;
|
||||
|
||||
/* but use a keepalive timeout instead of send timeout */
|
||||
/* we use a keepalive timeout instead of send timeout */
|
||||
tv.tv_sec = 0;
|
||||
tv.tv_usec = 0;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
@@ -880,24 +908,32 @@ static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = KEEPCNT;
|
||||
/* not checked when user_timeout != 0, but for clarity */
|
||||
optval = UNRESPONSIVE_PROBES;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = KEEPIDLE;
|
||||
BUILD_BUG_ON(UNRESPONSIVE_PROBES >= UNRESPONSIVE_TIMEOUT_SECS);
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS - (UNRESPONSIVE_PROBES);
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = KEEPINTVL;
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
|
||||
(char *)&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS * MSEC_PER_SEC;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
@@ -925,6 +961,8 @@ static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
ret = -EAFNOSUPPORT;
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
conn->last_peername = conn->peername;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -1088,9 +1126,11 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct scoutfs_net_connection *listener;
|
||||
struct scoutfs_net_connection *acc_conn;
|
||||
scoutfs_net_response_t resp_func;
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
unsigned long delay;
|
||||
void *resp_data;
|
||||
|
||||
trace_scoutfs_net_shutdown_work_enter(sb, 0, 0);
|
||||
trace_scoutfs_conn_shutdown_start(conn);
|
||||
@@ -1136,6 +1176,30 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
/* and wait for accepted conn shutdown work to finish */
|
||||
wait_event(conn->waitq, empty_accepted_list(conn));
|
||||
|
||||
/*
|
||||
* Forced unmount will cause net submit to fail once it's
|
||||
* started and it calls shutdown to interrupt any previous
|
||||
* senders waiting for a response. The response callbacks can
|
||||
* do quite a lot of work so we're careful to call them outside
|
||||
* the lock.
|
||||
*/
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
spin_lock(&conn->lock);
|
||||
list_splice_tail_init(&conn->send_queue, &conn->resend_queue);
|
||||
while ((msend = list_first_entry_or_null(&conn->resend_queue,
|
||||
struct message_send, head))) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
free_msend(ninf, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
call_resp_func(sb, conn, resp_func, resp_data, NULL, 0, -ECONNABORTED);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
/* greetings aren't resent across sockets */
|
||||
@@ -1205,6 +1269,7 @@ static void scoutfs_net_reconn_free_worker(struct work_struct *work)
|
||||
unsigned long now = jiffies;
|
||||
unsigned long deadline = 0;
|
||||
bool requeue = false;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_reconn_free_work_enter(sb, 0, 0);
|
||||
|
||||
@@ -1218,10 +1283,18 @@ restart:
|
||||
time_after_eq(now, acc->reconn_deadline))) {
|
||||
set_conn_fl(acc, reconn_freeing);
|
||||
spin_unlock(&conn->lock);
|
||||
if (!test_conn_fl(conn, shutting_down))
|
||||
scoutfs_info(sb, "client timed out "SIN_FMT" -> "SIN_FMT", can not reconnect",
|
||||
SIN_ARG(&acc->sockname),
|
||||
SIN_ARG(&acc->peername));
|
||||
if (!test_conn_fl(conn, shutting_down)) {
|
||||
scoutfs_info(sb, "client "SIN_FMT" reconnect timed out, fencing",
|
||||
SIN_ARG(&acc->last_peername));
|
||||
ret = scoutfs_fence_start(sb, acc->rid,
|
||||
acc->last_peername.sin_addr.s_addr,
|
||||
SCOUTFS_FENCE_CLIENT_RECONNECT);
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "client fence returned err %d, shutting down server",
|
||||
ret);
|
||||
scoutfs_server_abort(sb);
|
||||
}
|
||||
}
|
||||
destroy_conn(acc);
|
||||
goto restart;
|
||||
}
|
||||
@@ -1292,6 +1365,7 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
init_waitqueue_head(&conn->waitq);
|
||||
conn->sockname.sin_family = AF_INET;
|
||||
conn->peername.sin_family = AF_INET;
|
||||
conn->last_peername.sin_family = AF_INET;
|
||||
INIT_LIST_HEAD(&conn->accepted_head);
|
||||
INIT_LIST_HEAD(&conn->accepted_list);
|
||||
conn->next_send_seq = 1;
|
||||
@@ -1458,8 +1532,7 @@ int scoutfs_net_connect(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct sockaddr_in *sin, unsigned long timeout_ms)
|
||||
{
|
||||
int error = 0;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
conn->connect_sin = *sin;
|
||||
@@ -1467,10 +1540,8 @@ int scoutfs_net_connect(struct super_block *sb,
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
queue_work(conn->workq, &conn->connect_work);
|
||||
|
||||
ret = wait_event_interruptible(conn->waitq,
|
||||
connect_result(conn, &error));
|
||||
return ret ?: error;
|
||||
wait_event(conn->waitq, connect_result(conn, &ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_valid_greeting(struct scoutfs_net_connection *conn)
|
||||
@@ -1606,10 +1677,10 @@ restart:
|
||||
conn->next_send_id = reconn->next_send_id;
|
||||
atomic64_set(&conn->recv_seq, atomic64_read(&reconn->recv_seq));
|
||||
|
||||
/* greeting response/ack will be on conn send queue */
|
||||
/* reconn should be idle while in reconn_wait */
|
||||
BUG_ON(!list_empty(&reconn->send_queue));
|
||||
BUG_ON(!list_empty(&conn->resend_queue));
|
||||
list_splice_init(&reconn->resend_queue, &conn->resend_queue);
|
||||
/* queued greeting response is racing, can be in send or resend queue */
|
||||
list_splice_tail_init(&reconn->resend_queue, &conn->resend_queue);
|
||||
|
||||
/* new conn info is unused, swap, old won't call down */
|
||||
swap(conn->info, reconn->info);
|
||||
@@ -1701,23 +1772,6 @@ int scoutfs_net_response_node(struct super_block *sb,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* The response function that was submitted with the request is not
|
||||
* called if the request is canceled here.
|
||||
*/
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
msend = find_request(conn, cmd, id);
|
||||
if (msend)
|
||||
complete_send(conn, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
struct sync_request_completion {
|
||||
struct completion comp;
|
||||
void *resp;
|
||||
@@ -1773,11 +1827,10 @@ int scoutfs_net_sync_request(struct super_block *sb,
|
||||
ret = scoutfs_net_submit_request(sb, conn, cmd, arg, arg_len,
|
||||
sync_response, &sreq, &id);
|
||||
|
||||
ret = wait_for_completion_interruptible(&sreq.comp);
|
||||
if (ret == -ERESTARTSYS)
|
||||
scoutfs_net_cancel_request(sb, conn, cmd, id);
|
||||
else
|
||||
if (ret == 0) {
|
||||
wait_for_completion(&sreq.comp);
|
||||
ret = sreq.error;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -49,6 +49,7 @@ struct scoutfs_net_connection {
|
||||
u64 greeting_id;
|
||||
struct sockaddr_in sockname;
|
||||
struct sockaddr_in peername;
|
||||
struct sockaddr_in last_peername;
|
||||
|
||||
struct list_head accepted_head;
|
||||
struct scoutfs_net_connection *listening_conn;
|
||||
@@ -99,6 +100,16 @@ static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
|
||||
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
|
||||
}
|
||||
|
||||
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_in *sin)
|
||||
{
|
||||
BUG_ON(sin->sin_family != AF_INET);
|
||||
|
||||
memset(addr, 0, sizeof(union scoutfs_inet_addr));
|
||||
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
addr->v4.addr = be32_to_le32(sin->sin_addr.s_addr);
|
||||
addr->v4.port = be16_to_le16(sin->sin_port);
|
||||
}
|
||||
|
||||
struct scoutfs_net_connection *
|
||||
scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
scoutfs_net_notify_t notify_up,
|
||||
@@ -123,9 +134,6 @@ int scoutfs_net_submit_request_node(struct super_block *sb,
|
||||
u64 rid, u8 cmd, void *arg, u16 arg_len,
|
||||
scoutfs_net_response_t resp_func,
|
||||
void *resp_data, u64 *id_ret);
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id);
|
||||
int scoutfs_net_sync_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, void *arg, unsigned arg_len,
|
||||
|
||||
@@ -137,11 +137,10 @@ struct omap_request {
|
||||
/*
|
||||
* In each inode group cluster lock we store data to track the open ino
|
||||
* map which tracks all the inodes that the cluster lock covers. When
|
||||
* the version shows that the map is stale we send a request to update
|
||||
* it.
|
||||
* the seq shows that the map is stale we send a request to update it.
|
||||
*/
|
||||
struct scoutfs_omap_lock_data {
|
||||
u64 version;
|
||||
u64 seq;
|
||||
bool req_in_flight;
|
||||
wait_queue_head_t waitq;
|
||||
struct scoutfs_open_ino_map map;
|
||||
@@ -485,6 +484,10 @@ static int remove_rid_from_reqs(struct omap_info *ominf, u64 rid, u64 *resp_rid,
|
||||
* response if it was the last rid waiting for a response.
|
||||
*
|
||||
* If this returns an error then the server will shut down.
|
||||
*
|
||||
* This can be called multiple times by different servers if there are
|
||||
* errors reclaiming an evicted mount, so we allow asking to remove a
|
||||
* rid that hasn't been added.
|
||||
*/
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid)
|
||||
{
|
||||
@@ -495,21 +498,20 @@ int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid)
|
||||
u64 resp_id = 0;
|
||||
int ret;
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
entry = find_rid(&ominf->rids, rid);
|
||||
if (entry)
|
||||
free_rid(&ominf->rids, entry);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
/* the server really shouldn't be removing a rid it never added */
|
||||
if (WARN_ON_ONCE(!entry)) {
|
||||
ret = -ENOENT;
|
||||
if (!entry) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -593,10 +595,6 @@ out:
|
||||
free_req(req);
|
||||
}
|
||||
|
||||
/* it's fine if we couldn't send to a client that left */
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -616,7 +614,7 @@ static int handle_requests(struct super_block *sb)
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
if (scoutfs_recov_next_pending(sb, SCOUTFS_RECOV_GREETING))
|
||||
if (scoutfs_recov_next_pending(sb, 0, SCOUTFS_RECOV_GREETING))
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
@@ -830,8 +828,7 @@ static bool omap_req_in_flight(struct scoutfs_lock *lock, struct scoutfs_omap_lo
|
||||
/*
|
||||
* Make sure the map covered by the cluster lock is current. The caller
|
||||
* holds the cluster lock so once we store lock_data on the cluster lock
|
||||
* it won't be freed and the write_version in the cluster lock won't
|
||||
* change.
|
||||
* it won't be freed and the write_seq in the cluster lock won't change.
|
||||
*
|
||||
* The omap_spinlock protects the omap_data in the cluster lock. We
|
||||
* have to drop it if we have to block to allocate lock_data, send a
|
||||
@@ -858,7 +855,7 @@ static int get_current_lock_data(struct super_block *sb, struct scoutfs_lock *lo
|
||||
}
|
||||
|
||||
if (lock->omap_data == NULL) {
|
||||
ldata->version = lock->write_version - 1; /* ensure refresh */
|
||||
ldata->seq = lock->write_seq - 1; /* ensure refresh */
|
||||
init_waitqueue_head(&ldata->waitq);
|
||||
|
||||
lock->omap_data = ldata;
|
||||
@@ -868,7 +865,7 @@ static int get_current_lock_data(struct super_block *sb, struct scoutfs_lock *lo
|
||||
}
|
||||
}
|
||||
|
||||
while (ldata->version != lock->write_version) {
|
||||
while (ldata->seq != lock->write_seq) {
|
||||
/* only one waiter sends a request at a time */
|
||||
if (!ldata->req_in_flight) {
|
||||
ldata->req_in_flight = true;
|
||||
@@ -888,7 +885,7 @@ static int get_current_lock_data(struct super_block *sb, struct scoutfs_lock *lo
|
||||
if (send_req) {
|
||||
ldata->req_in_flight = false;
|
||||
if (ret == 0)
|
||||
ldata->version = lock->write_version;
|
||||
ldata->seq = lock->write_seq;
|
||||
wake_up(&ldata->waitq);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -907,9 +904,9 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 and give the caller a write inode lock if it is safe to be
|
||||
* deleted. It's safe to be deleted when it is no longer reachable and
|
||||
* nothing is referencing it.
|
||||
* Return 1 and give the caller their locks when they should delete the
|
||||
* inode items. It's safe to delete the inode items when it is no
|
||||
* longer reachable and nothing is referencing it.
|
||||
*
|
||||
* The inode is unreachable when nlink hits zero. Cluster locks protect
|
||||
* modification and testing of nlink. We use the ino_lock_cov covrage
|
||||
@@ -924,15 +921,17 @@ out:
|
||||
* increase nlink from zero and let people get a reference to the inode.
|
||||
*/
|
||||
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_lock **lock_ret)
|
||||
struct scoutfs_lock **lock_ret, struct scoutfs_lock **orph_lock_ret)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_omap_lock_data *ldata;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
/* lock group and omap constants are defined independently */
|
||||
BUILD_BUG_ON(SCOUTFS_OPEN_INO_MAP_BITS != SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
@@ -963,12 +962,19 @@ int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
out:
|
||||
trace_scoutfs_omap_should_delete(sb, ino, inode->i_nlink, ret);
|
||||
|
||||
if (ret > 0) {
|
||||
err = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, ino, &orph_lock);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
if (ret <= 0) {
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
lock = NULL;
|
||||
}
|
||||
|
||||
*lock_ret = lock;
|
||||
*orph_lock_ret = orph_lock;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
int scoutfs_omap_inc(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_dec(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_lock **lock_ret);
|
||||
struct scoutfs_lock **lock_ret, struct scoutfs_lock **orph_lock_ret);
|
||||
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata);
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "block.h"
|
||||
#include "net.h"
|
||||
#include "sysfs.h"
|
||||
#include "fence.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -60,10 +61,9 @@
|
||||
* running (maybe they've deadlocked, or lost network communications).
|
||||
* In addition to a configuration slot in the super block, each quorum
|
||||
* member also has a known block location that represents their slot.
|
||||
* They set a flag in their block indicating that they've been elected
|
||||
* leader, then read slots for all the other blocks looking for
|
||||
* previously active leaders to fence. After that it can start the
|
||||
* server.
|
||||
* The block contains an array of events which are updated during the life
|
||||
* time of the quorum agent. The elected leader set its elected event
|
||||
* and can then start the server.
|
||||
*
|
||||
* It's critical to raft elections that a participant's term not go
|
||||
* backwards in time so each mount also uses its quorum block to store
|
||||
@@ -97,7 +97,7 @@ struct quorum_host_msg {
|
||||
|
||||
struct last_msg {
|
||||
struct quorum_host_msg msg;
|
||||
struct timespec64 ts;
|
||||
ktime_t ts;
|
||||
};
|
||||
|
||||
enum quorum_role { FOLLOWER, CANDIDATE, LEADER };
|
||||
@@ -209,7 +209,7 @@ static void send_msg_members(struct super_block *sb, int type, u64 term,
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct timespec64 ts;
|
||||
ktime_t now;
|
||||
int i;
|
||||
|
||||
struct scoutfs_quorum_message qmes = {
|
||||
@@ -235,7 +235,6 @@ static void send_msg_members(struct super_block *sb, int type, u64 term,
|
||||
|
||||
qmes.crc = quorum_message_crc(&qmes);
|
||||
|
||||
ts = ktime_to_timespec64(ktime_get());
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (!quorum_slot_present(super, i) ||
|
||||
@@ -243,12 +242,13 @@ static void send_msg_members(struct super_block *sb, int type, u64 term,
|
||||
continue;
|
||||
|
||||
scoutfs_quorum_slot_sin(super, i, &sin);
|
||||
now = ktime_get();
|
||||
kernel_sendmsg(qinf->sock, &mh, &kv, 1, kv.iov_len);
|
||||
|
||||
spin_lock(&qinf->show_lock);
|
||||
qinf->last_send[i].msg.term = term;
|
||||
qinf->last_send[i].msg.type = type;
|
||||
qinf->last_send[i].ts = ts;
|
||||
qinf->last_send[i].ts = now;
|
||||
spin_unlock(&qinf->show_lock);
|
||||
|
||||
if (i == only)
|
||||
@@ -308,6 +308,8 @@ static int recv_msg(struct super_block *sb, struct quorum_host_msg *msg,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
now = ktime_get();
|
||||
|
||||
if (ret != sizeof(qmes) ||
|
||||
qmes.crc != quorum_message_crc(&qmes) ||
|
||||
qmes.fsid != super->hdr.fsid ||
|
||||
@@ -327,24 +329,25 @@ static int recv_msg(struct super_block *sb, struct quorum_host_msg *msg,
|
||||
|
||||
spin_lock(&qinf->show_lock);
|
||||
qinf->last_recv[msg->from].msg = *msg;
|
||||
qinf->last_recv[msg->from].ts = ktime_to_timespec64(ktime_get());
|
||||
qinf->last_recv[msg->from].ts = now;
|
||||
spin_unlock(&qinf->show_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller can provide a mark that they're using to track their
|
||||
* written blocks. It's updated as they write the block and we can
|
||||
* compare it with what we read to see if there have been unexpected
|
||||
* intervening writes to the block -- the caller is supposed to have
|
||||
* exclusive access to the block (or was fenced).
|
||||
* Read and verify block fields before giving it to the caller. We
|
||||
* should have exclusive write access to the block. We know that
|
||||
* something has gone horribly wrong if we don't see our rid in the
|
||||
* begin event after we've written it as we started up.
|
||||
*/
|
||||
static int read_quorum_block(struct super_block *sb, u64 blkno,
|
||||
struct scoutfs_quorum_block *blk, __le64 *mark)
|
||||
static int read_quorum_block(struct super_block *sb, u64 blkno, struct scoutfs_quorum_block *blk,
|
||||
bool check_rid)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
const u64 rid = sbi->rid;
|
||||
char msg[150];
|
||||
__le32 crc;
|
||||
int ret;
|
||||
|
||||
@@ -355,165 +358,256 @@ static int read_quorum_block(struct super_block *sb, u64 blkno,
|
||||
|
||||
ret = scoutfs_block_read_sm(sb, sbi->meta_bdev, blkno,
|
||||
&blk->hdr, sizeof(*blk), &crc);
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "quorum block read error %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* detect invalid blocks */
|
||||
if (ret == 0 &&
|
||||
((blk->hdr.crc != crc) ||
|
||||
(le32_to_cpu(blk->hdr.magic) != SCOUTFS_BLOCK_MAGIC_QUORUM) ||
|
||||
(blk->hdr.fsid != super->hdr.fsid) ||
|
||||
(le64_to_cpu(blk->hdr.blkno) != blkno))) {
|
||||
scoutfs_inc_counter(sb, quorum_read_invalid_block);
|
||||
if (blk->hdr.crc != crc)
|
||||
snprintf(msg, sizeof(msg), "blk crc %08x != %08x",
|
||||
le32_to_cpu(blk->hdr.crc), le32_to_cpu(crc));
|
||||
else if (le32_to_cpu(blk->hdr.magic) != SCOUTFS_BLOCK_MAGIC_QUORUM)
|
||||
snprintf(msg, sizeof(msg), "blk magic %08x != %08x",
|
||||
le32_to_cpu(blk->hdr.magic), SCOUTFS_BLOCK_MAGIC_QUORUM);
|
||||
else if (blk->hdr.fsid != super->hdr.fsid)
|
||||
snprintf(msg, sizeof(msg), "blk fsid %016llx != %016llx",
|
||||
le64_to_cpu(blk->hdr.fsid), le64_to_cpu(super->hdr.fsid));
|
||||
else if (le64_to_cpu(blk->hdr.blkno) != blkno)
|
||||
snprintf(msg, sizeof(msg), "blk blkno %llu != %llu",
|
||||
le64_to_cpu(blk->hdr.blkno), blkno);
|
||||
else if (check_rid && le64_to_cpu(blk->events[SCOUTFS_QUORUM_EVENT_BEGIN].rid) != rid)
|
||||
snprintf(msg, sizeof(msg), "quorum block begin rid %016llx != our rid %016llx, are multiple mounts configured with this slot?",
|
||||
le64_to_cpu(blk->events[SCOUTFS_QUORUM_EVENT_BEGIN].rid), rid);
|
||||
else
|
||||
msg[0] = '\0';
|
||||
|
||||
if (msg[0] != '\0') {
|
||||
scoutfs_err(sb, "read invalid quorum block, %s", msg);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mark && *mark != 0 && blk->random_write_mark != *mark) {
|
||||
scoutfs_err(sb, "read unexpected quorum block write mark, are multiple mounts configured with the same slot?");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "quorum block read error %d", ret);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_quorum_block_event(struct super_block *sb,
|
||||
struct scoutfs_quorum_block *blk,
|
||||
struct scoutfs_quorum_block_event *ev)
|
||||
/*
|
||||
* It's really important in raft elections that the term not go
|
||||
* backwards in time. We achieve this by having each participant record
|
||||
* the greatest term they've seen in their quorum block. It's also
|
||||
* important that participants agree on the greatest term. It can
|
||||
* happen that one gets ahead of the rest, perhaps by being forcefully
|
||||
* shutdown after having just been elected. As everyone starts up it's
|
||||
* possible to have N-1 have term T-1 while just one participant thinks
|
||||
* the term is T. That single participant will ignore all messages
|
||||
* from older terms. If its timeout is greater then the others it can
|
||||
* immediately override the election of the majority and request votes
|
||||
* and become elected.
|
||||
*
|
||||
* A best-effort work around is to have everyone try and start from the
|
||||
* greatest term that they can find in everyone's blocks. If it works
|
||||
* then you avoid having those with greater terms ignore others. If it
|
||||
* doesn't work the elections will eventually stabilize after rocky
|
||||
* periods of fencing from what looks like concurrent elections.
|
||||
*/
|
||||
static void read_greatest_term(struct super_block *sb, u64 *term)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_quorum_block blk;
|
||||
int ret;
|
||||
int e;
|
||||
int s;
|
||||
|
||||
*term = 0;
|
||||
|
||||
for (s = 0; s < SCOUTFS_QUORUM_MAX_SLOTS; s++) {
|
||||
if (!quorum_slot_present(super, s))
|
||||
continue;
|
||||
|
||||
ret = read_quorum_block(sb, SCOUTFS_QUORUM_BLKNO + s, &blk, false);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
for (e = 0; e < ARRAY_SIZE(blk.events); e++) {
|
||||
if (blk.events[e].rid)
|
||||
*term = max(*term, le64_to_cpu(blk.events[e].term));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void set_quorum_block_event(struct super_block *sb, struct scoutfs_quorum_block *blk,
|
||||
int event, u64 term)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_quorum_block_event *ev;
|
||||
struct timespec64 ts;
|
||||
|
||||
getnstimeofday64(&ts);
|
||||
if (WARN_ON_ONCE(event < 0 || event >= SCOUTFS_QUORUM_EVENT_NR))
|
||||
return;
|
||||
|
||||
getnstimeofday64(&ts);
|
||||
le64_add_cpu(&blk->write_nr, 1);
|
||||
|
||||
ev = &blk->events[event];
|
||||
ev->write_nr = blk->write_nr;
|
||||
ev->rid = cpu_to_le64(sbi->rid);
|
||||
ev->term = cpu_to_le64(term);
|
||||
ev->ts.sec = cpu_to_le64(ts.tv_sec);
|
||||
ev->ts.nsec = cpu_to_le32(ts.tv_nsec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Every time we write a block we update the write stamp and random
|
||||
* write mark so readers can see our write.
|
||||
*/
|
||||
static int write_quorum_block(struct super_block *sb, u64 blkno,
|
||||
struct scoutfs_quorum_block *blk, __le64 *mark)
|
||||
static int write_quorum_block(struct super_block *sb, u64 blkno, struct scoutfs_quorum_block *blk)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(blkno < SCOUTFS_QUORUM_BLKNO) ||
|
||||
WARN_ON_ONCE(blkno >= (SCOUTFS_QUORUM_BLKNO +
|
||||
SCOUTFS_QUORUM_BLOCKS)))
|
||||
return -EINVAL;
|
||||
|
||||
do {
|
||||
get_random_bytes(&blk->random_write_mark,
|
||||
sizeof(blk->random_write_mark));
|
||||
} while (blk->random_write_mark == 0);
|
||||
|
||||
if (mark)
|
||||
*mark = blk->random_write_mark;
|
||||
|
||||
set_quorum_block_event(sb, blk, &blk->write);
|
||||
|
||||
ret = scoutfs_block_write_sm(sb, sbi->meta_bdev, blkno,
|
||||
&blk->hdr, sizeof(*blk));
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "quorum block write error %d", ret);
|
||||
|
||||
return ret;
|
||||
return scoutfs_block_write_sm(sb, sbi->meta_bdev, blkno, &blk->hdr, sizeof(*blk));
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the caller's slot's current quorum block, make a change, and
|
||||
* write it back out. If the caller provides a mark it can cause read
|
||||
* errors if we read a mark that doesn't match the last mark that the
|
||||
* caller wrote.
|
||||
* Read the caller's slot's quorum block, make a change, and write it
|
||||
* back out.
|
||||
*/
|
||||
static int update_quorum_block(struct super_block *sb, u64 blkno,
|
||||
__le64 *mark, int role, u64 term)
|
||||
static int update_quorum_block(struct super_block *sb, int event, u64 term, bool check_rid)
|
||||
{
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
u64 blkno = SCOUTFS_QUORUM_BLKNO + opts->quorum_slot_nr;
|
||||
struct scoutfs_quorum_block blk;
|
||||
u64 flags;
|
||||
u64 bits;
|
||||
u64 set;
|
||||
int ret;
|
||||
|
||||
ret = read_quorum_block(sb, blkno, &blk, mark);
|
||||
ret = read_quorum_block(sb, blkno, &blk, check_rid);
|
||||
if (ret == 0) {
|
||||
if (blk.term != cpu_to_le64(term)) {
|
||||
blk.term = cpu_to_le64(term);
|
||||
set_quorum_block_event(sb, &blk, &blk.update_term);
|
||||
}
|
||||
|
||||
flags = le64_to_cpu(blk.flags);
|
||||
bits = SCOUTFS_QUORUM_BLOCK_LEADER;
|
||||
set = role == LEADER ? SCOUTFS_QUORUM_BLOCK_LEADER : 0;
|
||||
if ((flags & bits) != set)
|
||||
set_quorum_block_event(sb, &blk,
|
||||
set ? &blk.set_leader :
|
||||
&blk.clear_leader);
|
||||
blk.flags = cpu_to_le64((flags & ~bits) | set);
|
||||
|
||||
ret = write_quorum_block(sb, blkno, &blk, mark);
|
||||
set_quorum_block_event(sb, &blk, event, term);
|
||||
ret = write_quorum_block(sb, blkno, &blk);
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "error %d reading quorum block %llu to update event %d term %llu",
|
||||
ret, blkno, event, term);
|
||||
} else {
|
||||
scoutfs_err(sb, "error %d writing quorum block %llu after updating event %d term %llu",
|
||||
ret, blkno, event, term);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has fenced previous leaders and reclaimed their
|
||||
* resources. We can now update our fence event with a greater term to
|
||||
* stop future leaders from doing the same.
|
||||
*/
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term)
|
||||
{
|
||||
return update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_FENCE, term, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has been elected and updated their block, but
|
||||
* can't yet assume that it has exclusive access to the metadata device.
|
||||
* We read all the quorum blocks looking for previously elected leaders
|
||||
* to fence so that we're the only leader running.
|
||||
* The calling server has been elected and has started running but can't
|
||||
* yet assume that it has exclusive access to the metadata device. We
|
||||
* read all the quorum blocks looking for previously elected leaders to
|
||||
* fence so that we're the only leader running.
|
||||
*
|
||||
* We're relying on the invariant that there can't be two mounts running
|
||||
* with the same slot nr at the same time. With this constraint there
|
||||
* can be at most two previous leaders per slot that need to be fenced:
|
||||
* a persistent record of an old mount on the slot, and an active mount.
|
||||
*
|
||||
* If we start fence requests then we only wait for them to complete
|
||||
* before returning. The server will reclaim their resources once it is
|
||||
* up and running and will call us to update the fence event. If we
|
||||
* don't start fence requests then we update the fence event
|
||||
* immediately, the server has nothing more to do.
|
||||
*
|
||||
* Quorum will be sending heartbeats while we wait for fencing. That
|
||||
* keeps us from being fenced while we allow userspace fencing to take a
|
||||
* reasonably long time. We still want to timeout eventually.
|
||||
*/
|
||||
static int fence_leader_blocks(struct super_block *sb)
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term)
|
||||
{
|
||||
#define NR_OLD 2
|
||||
struct scoutfs_quorum_block_event old[SCOUTFS_QUORUM_MAX_SLOTS][NR_OLD] = {{{0,}}};
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_quorum_block blk;
|
||||
struct sockaddr_in sin;
|
||||
u64 blkno;
|
||||
const u64 rid = sbi->rid;
|
||||
bool fence_started = false;
|
||||
u64 fenced = 0;
|
||||
__le64 fence_rid;
|
||||
int ret = 0;
|
||||
int err;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
BUILD_BUG_ON(SCOUTFS_QUORUM_BLOCKS < SCOUTFS_QUORUM_MAX_SLOTS);
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (i == opts->quorum_slot_nr)
|
||||
if (!quorum_slot_present(super, i))
|
||||
continue;
|
||||
|
||||
blkno = SCOUTFS_QUORUM_BLKNO + i;
|
||||
ret = read_quorum_block(sb, blkno, &blk, NULL);
|
||||
ret = read_quorum_block(sb, SCOUTFS_QUORUM_BLKNO + i, &blk, false);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (!(le64_to_cpu(blk.flags) & SCOUTFS_QUORUM_BLOCK_LEADER))
|
||||
continue;
|
||||
/* elected leader still running */
|
||||
if (le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_ELECT].term) >
|
||||
le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_STOP].term))
|
||||
old[i][0] = blk.events[SCOUTFS_QUORUM_EVENT_ELECT];
|
||||
|
||||
scoutfs_inc_counter(sb, quorum_fence_leader);
|
||||
scoutfs_quorum_slot_sin(super, i, &sin);
|
||||
/* persistent record of previous server before elected */
|
||||
if ((le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_FENCE].term) >
|
||||
le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_STOP].term)) &&
|
||||
(le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_FENCE].term) <
|
||||
le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_ELECT].term)))
|
||||
old[i][1] = blk.events[SCOUTFS_QUORUM_EVENT_FENCE];
|
||||
|
||||
scoutfs_err(sb, "fencing "SCSBF" at "SIN_FMT,
|
||||
SCSB_LEFR_ARGS(super->hdr.fsid, blk.set_leader.rid),
|
||||
SIN_ARG(&sin));
|
||||
/* find greatest term that has fenced everything before it */
|
||||
fenced = max(fenced, le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_FENCE].term));
|
||||
}
|
||||
|
||||
blk.flags &= ~cpu_to_le64(SCOUTFS_QUORUM_BLOCK_LEADER);
|
||||
set_quorum_block_event(sb, &blk, &blk.fenced);
|
||||
/* now actually fence any old leaders which haven't been fenced yet */
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
for (j = 0; j < NR_OLD; j++) {
|
||||
if (le64_to_cpu(old[i][j].term) == 0 || /* uninitialized */
|
||||
le64_to_cpu(old[i][j].term) < fenced || /* already fenced */
|
||||
le64_to_cpu(old[i][j].term) > term || /* newer than us */
|
||||
le64_to_cpu(old[i][j].rid) == rid) /* us */
|
||||
continue;
|
||||
|
||||
ret = write_quorum_block(sb, blkno, &blk, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, quorum_fence_leader);
|
||||
scoutfs_quorum_slot_sin(super, i, &sin);
|
||||
fence_rid = old[i][j].rid;
|
||||
|
||||
scoutfs_info(sb, "fencing previous leader "SCSBF" at term %llu in slot %u with address "SIN_FMT,
|
||||
SCSB_LEFR_ARGS(super->hdr.fsid, fence_rid),
|
||||
le64_to_cpu(old[i][j].term), i, SIN_ARG(&sin));
|
||||
ret = scoutfs_fence_start(sb, le64_to_cpu(fence_rid), sin.sin_addr.s_addr,
|
||||
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
fence_started = true;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "error %d fencing active", ret);
|
||||
scoutfs_inc_counter(sb, quorum_fence_error);
|
||||
if (fence_started) {
|
||||
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
} else {
|
||||
err = scoutfs_quorum_fence_complete(sb, term);
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, quorum_fence_error);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -529,37 +623,36 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
struct quorum_info *qinf = container_of(work, struct quorum_info, work);
|
||||
struct super_block *sb = qinf->sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
struct scoutfs_quorum_block blk;
|
||||
struct sockaddr_in unused;
|
||||
struct quorum_host_msg msg;
|
||||
struct quorum_status qst;
|
||||
__le64 mark;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
/* recording votes from slots as native single word bitmap */
|
||||
BUILD_BUG_ON(SCOUTFS_QUORUM_MAX_SLOTS > BITS_PER_LONG);
|
||||
|
||||
/* get our starting term from our persistent block */
|
||||
mark = 0;
|
||||
blkno = SCOUTFS_QUORUM_BLKNO + opts->quorum_slot_nr;
|
||||
ret = read_quorum_block(sb, blkno, &blk, &mark);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* start out as a follower */
|
||||
qst.role = FOLLOWER;
|
||||
qst.term = le64_to_cpu(blk.term);
|
||||
qst.term = 0;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
|
||||
/* read our starting term from greatest in all events in all slots */
|
||||
read_greatest_term(sb, &qst.term);
|
||||
|
||||
/* see if there's a server to chose heartbeat or election timeout */
|
||||
if (scoutfs_quorum_server_sin(sb, &unused) == 0)
|
||||
qst.timeout = heartbeat_timeout();
|
||||
else
|
||||
qst.timeout = election_timeout();
|
||||
|
||||
while (!qinf->shutdown) {
|
||||
/* record that we're up and running, readers check that it isn't updated */
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_BEGIN, qst.term, false);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
while (!(qinf->shutdown || scoutfs_forcing_unmount(sb))) {
|
||||
|
||||
ret = recv_msg(sb, &msg, qst.timeout);
|
||||
if (ret < 0) {
|
||||
@@ -589,11 +682,6 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.term);
|
||||
scoutfs_inc_counter(sb, quorum_send_resignation);
|
||||
|
||||
ret = update_quorum_block(sb, blkno, &mark,
|
||||
qst.role, qst.term);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&qinf->show_lock);
|
||||
@@ -624,8 +712,7 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
qst.timeout = election_timeout();
|
||||
|
||||
/* store our increased term */
|
||||
ret = update_quorum_block(sb, blkno, &mark,
|
||||
qst.role, qst.term);
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_TERM, qst.term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
@@ -642,16 +729,20 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
qst.term);
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_send_request);
|
||||
|
||||
/* store our increased term */
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_TERM, qst.term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* candidates count votes in their term */
|
||||
if (qst.role == CANDIDATE &&
|
||||
msg.type == SCOUTFS_QUORUM_MSG_VOTE) {
|
||||
if (test_bit(msg.from, &qst.vote_bits)) {
|
||||
if (test_and_set_bit(msg.from, &qst.vote_bits)) {
|
||||
scoutfs_warn(sb, "already received vote from %u in term %llu, are there multiple mounts with quorum_slot_nr=%u?",
|
||||
msg.from, qst.term, msg.from);
|
||||
}
|
||||
set_bit(msg.from, &qst.vote_bits);
|
||||
scoutfs_inc_counter(sb, quorum_recv_vote);
|
||||
}
|
||||
|
||||
@@ -670,10 +761,8 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
qst.term);
|
||||
qst.timeout = heartbeat_interval();
|
||||
|
||||
/* set our leader flag and fence */
|
||||
ret = update_quorum_block(sb, blkno, &mark,
|
||||
qst.role, qst.term) ?:
|
||||
fence_leader_blocks(sb);
|
||||
/* record that we've been elected before starting up server */
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_ELECT, qst.term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -684,9 +773,16 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
|
||||
ret = scoutfs_server_start(sb, qst.term);
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "server startup failed with %d",
|
||||
ret);
|
||||
goto out;
|
||||
clear_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
/* store our increased term */
|
||||
err = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP, qst.term,
|
||||
true);
|
||||
if (err < 0) {
|
||||
ret = err;
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -727,77 +823,75 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
/* always try to stop a running server as we stop */
|
||||
if (test_bit(QINF_FLAG_SERVER, &qinf->flags)) {
|
||||
scoutfs_server_stop(sb);
|
||||
scoutfs_fence_stop(sb);
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.term);
|
||||
}
|
||||
|
||||
/* always try to clear leader block as we stop to avoid fencing */
|
||||
if (qst.role == LEADER) {
|
||||
ret = update_quorum_block(sb, blkno, &mark,
|
||||
FOLLOWER, qst.term);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
/* record that this slot no longer has an active quorum */
|
||||
update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_END, qst.term, true);
|
||||
out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "quorum service saw error %d, shutting down. Cluster will be degraded until this slot is remounted to restart the quorum service",
|
||||
scoutfs_err(sb, "quorum service saw error %d, shutting down. This mount is no longer participating in quorum. It should be remounted to restore service.",
|
||||
ret);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a flag for the quorum work's next iteration to indicate that the
|
||||
* server has shutdown and that it should step down as leader, update
|
||||
* quorum blocks, and stop sending heartbeats.
|
||||
* The calling server has shutdown and is no longer using shared
|
||||
* resources. Clear the bit so that we stop sending heartbeats and
|
||||
* allow the next server to be elected. Update the stop event so that
|
||||
* it won't be considered available by clients or fenced by the next
|
||||
* leader.
|
||||
*/
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb)
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
|
||||
set_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
clear_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP, term, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clients read quorum blocks looking for the leader with a server whose
|
||||
* address it can try and connect to.
|
||||
*
|
||||
* There can be multiple running servers if a client checks before a
|
||||
* server has had a chance to fence any old servers. We try to use the
|
||||
* block with the most recent timestamp. If we get it wrong the
|
||||
* connection will timeout and the client will try again, presumably
|
||||
* finding a single server block.
|
||||
* There can be records of multiple previous elected leaders if the
|
||||
* current server hasn't yet fenced any old servers. We use the elected
|
||||
* leader with the greatest elected term. If we get it wrong the
|
||||
* connection will timeout and the client will try again.
|
||||
*/
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_quorum_block blk;
|
||||
struct timespec64 recent = {0,};
|
||||
struct timespec64 ts;
|
||||
int ret;
|
||||
u64 elect_term;
|
||||
u64 term = 0;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
ret = read_quorum_block(sb, SCOUTFS_QUORUM_BLKNO + i, &blk,
|
||||
NULL);
|
||||
if (!quorum_slot_present(super, i))
|
||||
continue;
|
||||
|
||||
ret = read_quorum_block(sb, SCOUTFS_QUORUM_BLKNO + i, &blk, false);
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "error reading quorum block nr %u: %d",
|
||||
i, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ts.tv_sec = le64_to_cpu(blk.set_leader.ts.sec);
|
||||
ts.tv_nsec = le32_to_cpu(blk.set_leader.ts.nsec);
|
||||
|
||||
if ((le64_to_cpu(blk.flags) & SCOUTFS_QUORUM_BLOCK_LEADER) &&
|
||||
(timespec64_to_ns(&ts) > timespec64_to_ns(&recent))) {
|
||||
recent = ts;
|
||||
elect_term = le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_ELECT].term);
|
||||
if (elect_term > term &&
|
||||
elect_term > le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_STOP].term)) {
|
||||
term = elect_term;
|
||||
scoutfs_quorum_slot_sin(super, i, sin);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (timespec64_to_ns(&recent) == 0)
|
||||
if (term == 0)
|
||||
ret = -ENOENT;
|
||||
|
||||
out:
|
||||
@@ -864,6 +958,7 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
struct quorum_status qst;
|
||||
struct last_msg last;
|
||||
struct timespec64 ts;
|
||||
const ktime_t now = ktime_get();
|
||||
size_t size;
|
||||
int ret;
|
||||
int i;
|
||||
@@ -885,9 +980,9 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
qst.vote_for);
|
||||
snprintf_ret(buf, size, &ret, "vote_bits 0x%lx (count %lu)\n",
|
||||
qst.vote_bits, hweight_long(qst.vote_bits));
|
||||
ts = ktime_to_timespec64(qst.timeout);
|
||||
snprintf_ret(buf, size, &ret, "timeout %llu.%u\n",
|
||||
(u64)ts.tv_sec, (int)ts.tv_nsec);
|
||||
ts = ktime_to_timespec64(ktime_sub(qst.timeout, now));
|
||||
snprintf_ret(buf, size, &ret, "timeout_in_secs %lld.%09u\n",
|
||||
(s64)ts.tv_sec, (int)ts.tv_nsec);
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
spin_lock(&qinf->show_lock);
|
||||
@@ -897,10 +992,11 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
if (last.msg.term == 0)
|
||||
continue;
|
||||
|
||||
ts = ktime_to_timespec64(ktime_sub(now, last.ts));
|
||||
snprintf_ret(buf, size, &ret,
|
||||
"last_send to %u term %llu type %u ts %llu.%u\n",
|
||||
"last_send to %u term %llu type %u secs_since %lld.%09u\n",
|
||||
i, last.msg.term, last.msg.type,
|
||||
(u64)last.ts.tv_sec, (int)last.ts.tv_nsec);
|
||||
(s64)ts.tv_sec, (int)ts.tv_nsec);
|
||||
}
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
@@ -910,10 +1006,12 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
|
||||
if (last.msg.term == 0)
|
||||
continue;
|
||||
|
||||
ts = ktime_to_timespec64(ktime_sub(now, last.ts));
|
||||
snprintf_ret(buf, size, &ret,
|
||||
"last_recv from %u term %llu type %u ts %llu.%u\n",
|
||||
"last_recv from %u term %llu type %u secs_since %lld.%09u\n",
|
||||
i, last.msg.term, last.msg.type,
|
||||
(u64)last.ts.tv_sec, (int)last.ts.tv_nsec);
|
||||
(s64)ts.tv_sec, (int)ts.tv_nsec);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -950,13 +1048,17 @@ static inline bool valid_ipv4_port(__be16 port)
|
||||
static int verify_quorum_slots(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
char slots[(SCOUTFS_QUORUM_MAX_SLOTS * 3) + 1];
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct sockaddr_in other;
|
||||
struct sockaddr_in sin;
|
||||
int found = 0;
|
||||
int ret;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (!quorum_slot_present(super, i))
|
||||
continue;
|
||||
@@ -997,6 +1099,25 @@ static int verify_quorum_slots(struct super_block *sb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!quorum_slot_present(super, opts->quorum_slot_nr)) {
|
||||
char *str = slots;
|
||||
*str = '\0';
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (quorum_slot_present(super, i)) {
|
||||
ret = snprintf(str, &slots[ARRAY_SIZE(slots)] - str, "%c%u",
|
||||
str == slots ? ' ' : ',', i);
|
||||
if (ret < 2 || ret > 3) {
|
||||
scoutfs_err(sb, "error gathering populated slots");
|
||||
return -EINVAL;
|
||||
}
|
||||
str += ret;
|
||||
}
|
||||
}
|
||||
scoutfs_err(sb, "quorum_slot_nr=%u option references unused slot, must be one of the following configured slots:%s",
|
||||
opts->quorum_slot_nr, slots);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Always require a majority except in the pathological cases of
|
||||
* 1 or 2 members.
|
||||
|
||||
@@ -2,12 +2,15 @@
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term);
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_destroy(struct super_block *sb);
|
||||
|
||||
@@ -16,9 +16,11 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "recov.h"
|
||||
#include "cmp.h"
|
||||
|
||||
/*
|
||||
* There are a few server messages which can't be processed until they
|
||||
@@ -47,18 +49,41 @@ struct recov_pending {
|
||||
int which;
|
||||
};
|
||||
|
||||
static struct recov_pending *find_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
static struct recov_pending *next_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
{
|
||||
struct recov_pending *pend;
|
||||
|
||||
list_for_each_entry(pend, &recinf->pending, head) {
|
||||
if ((rid == 0 || pend->rid == rid) && (pend->which & which))
|
||||
if (pend->rid > rid && pend->which & which)
|
||||
return pend;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct recov_pending *lookup_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
{
|
||||
struct recov_pending *pend;
|
||||
|
||||
pend = next_pending(recinf, rid - 1, which);
|
||||
if (pend && pend->rid == rid)
|
||||
return pend;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We keep the pending list sorted by rid so that we can iterate over
|
||||
* them. The list should be small and shouldn't be used often.
|
||||
*/
|
||||
static int cmp_pending_rid(void *priv, struct list_head *A, struct list_head *B)
|
||||
{
|
||||
struct recov_pending *a = list_entry(A, struct recov_pending, head);
|
||||
struct recov_pending *b = list_entry(B, struct recov_pending, head);
|
||||
|
||||
return scoutfs_cmp_u64s(a->rid, b->rid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that we'll be waiting for a client to recover something.
|
||||
* _finished will eventually be called for every _prepare, either
|
||||
@@ -80,14 +105,15 @@ int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which)
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
pend = find_pending(recinf, rid, SCOUTFS_RECOV_ALL);
|
||||
pend = lookup_pending(recinf, rid, SCOUTFS_RECOV_ALL);
|
||||
if (pend) {
|
||||
pend->which |= which;
|
||||
} else {
|
||||
swap(pend, alloc);
|
||||
pend->rid = rid;
|
||||
pend->which = which;
|
||||
list_add(&pend->head, &recinf->pending);
|
||||
list_add_tail(&pend->head, &recinf->pending);
|
||||
list_sort(NULL, &recinf->pending, cmp_pending_rid);
|
||||
}
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
@@ -159,7 +185,7 @@ int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which)
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
pend = find_pending(recinf, rid, which);
|
||||
pend = lookup_pending(recinf, rid, which);
|
||||
if (pend) {
|
||||
pend->which &= ~which;
|
||||
if (pend->which) {
|
||||
@@ -190,29 +216,28 @@ bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which)
|
||||
bool is_pending;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
is_pending = find_pending(recinf, rid, which) != NULL;
|
||||
is_pending = lookup_pending(recinf, rid, which) != NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
return is_pending;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 0 if there are no rids waiting for the given state to be
|
||||
* recovered. Returns the rid of a client still waiting if there are
|
||||
* any, in no specified order.
|
||||
* Return the next rid after the given rid of a client waiting for the
|
||||
* given state to be recovered. Start with rid 0, returns 0 when there
|
||||
* are no more clients waiting for recovery.
|
||||
*
|
||||
* This is inherently racey. Callers are responsible for resolving any
|
||||
* actions taken based on pending with the recovery finishing, perhaps
|
||||
* before we return.
|
||||
*/
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, int which)
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
u64 rid;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
pend = find_pending(recinf, 0, which);
|
||||
pend = next_pending(recinf, rid, which);
|
||||
rid = pend ? pend->rid : 0;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
@@ -237,7 +262,7 @@ void scoutfs_recov_shutdown(struct super_block *sb)
|
||||
recinf->timeout_fn = NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
list_for_each_entry_safe(pend, tmp, &recinf->pending, head) {
|
||||
list_for_each_entry_safe(pend, tmp, &list, head) {
|
||||
list_del(&pend->head);
|
||||
kfree(pend);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_
|
||||
unsigned int timeout_ms);
|
||||
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which);
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, int which);
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, u64 rid, int which);
|
||||
void scoutfs_recov_shutdown(struct super_block *sb);
|
||||
|
||||
int scoutfs_recov_setup(struct super_block *sb);
|
||||
|
||||
@@ -58,9 +58,6 @@ struct lock_info;
|
||||
__entry->pref##_map, \
|
||||
__entry->pref##_flags
|
||||
|
||||
#define DECLARE_TRACED_EXTENT(name) \
|
||||
struct scoutfs_traced_extent name = {0}
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_ino_ret_class,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, int ret),
|
||||
|
||||
@@ -406,32 +403,36 @@ TRACE_EVENT(scoutfs_sync_fs,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_write_func,
|
||||
TP_PROTO(struct super_block *sb, unsigned long dirty),
|
||||
TP_PROTO(struct super_block *sb, u64 dirty_block_bytes, u64 dirty_item_pages),
|
||||
|
||||
TP_ARGS(sb, dirty),
|
||||
TP_ARGS(sb, dirty_block_bytes, dirty_item_pages),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(unsigned long, dirty)
|
||||
__field(__u64, dirty_block_bytes)
|
||||
__field(__u64, dirty_item_pages)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->dirty = dirty;
|
||||
__entry->dirty_block_bytes = dirty_block_bytes;
|
||||
__entry->dirty_item_pages = dirty_item_pages;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" dirty %lu", SCSB_TRACE_ARGS, __entry->dirty)
|
||||
TP_printk(SCSBF" dirty_block_bytes %llu dirty_item_pages %llu",
|
||||
SCSB_TRACE_ARGS, __entry->dirty_block_bytes, __entry->dirty_item_pages)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_trans_hold_release_class,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders, int ret),
|
||||
|
||||
TP_ARGS(sb, journal_info, holders),
|
||||
TP_ARGS(sb, journal_info, holders, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(unsigned long, journal_info)
|
||||
__field(int, holders)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -440,17 +441,17 @@ DECLARE_EVENT_CLASS(scoutfs_trans_hold_release_class,
|
||||
__entry->holders = holders;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" journal_info 0x%0lx holders %d",
|
||||
SCSB_TRACE_ARGS, __entry->journal_info, __entry->holders)
|
||||
TP_printk(SCSBF" journal_info 0x%0lx holders %d ret %d",
|
||||
SCSB_TRACE_ARGS, __entry->journal_info, __entry->holders, __entry->ret)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_trans_acquired_hold,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_ARGS(sb, journal_info, holders)
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_hold_trans,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders, int ret),
|
||||
TP_ARGS(sb, journal_info, holders, ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_trans_hold_release_class, scoutfs_release_trans,
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders),
|
||||
TP_ARGS(sb, journal_info, holders)
|
||||
TP_PROTO(struct super_block *sb, void *journal_info, int holders, int ret),
|
||||
TP_ARGS(sb, journal_info, holders, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_ioc_release,
|
||||
@@ -985,22 +986,6 @@ TRACE_EVENT(scoutfs_delete_inode,
|
||||
__entry->mode, __entry->size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_scan_orphans,
|
||||
TP_PROTO(struct super_block *sb),
|
||||
|
||||
TP_ARGS(sb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = sb->s_dev;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_key_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key),
|
||||
TP_ARGS(sb, key),
|
||||
@@ -1644,6 +1629,164 @@ TRACE_EVENT(scoutfs_btree_walk,
|
||||
__entry->level, __entry->ref_blkno, __entry->ref_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_set_parent,
|
||||
TP_PROTO(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root, struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root),
|
||||
|
||||
TP_ARGS(sb, root, key, par_root),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, root_blkno)
|
||||
__field(__u64, root_seq)
|
||||
__field(__u8, root_height)
|
||||
sk_trace_define(key)
|
||||
__field(__u64, par_root_blkno)
|
||||
__field(__u64, par_root_seq)
|
||||
__field(__u8, par_root_height)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->root_blkno = le64_to_cpu(root->ref.blkno);
|
||||
__entry->root_seq = le64_to_cpu(root->ref.seq);
|
||||
__entry->root_height = root->height;
|
||||
sk_trace_assign(key, key);
|
||||
__entry->par_root_blkno = le64_to_cpu(par_root->ref.blkno);
|
||||
__entry->par_root_seq = le64_to_cpu(par_root->ref.seq);
|
||||
__entry->par_root_height = par_root->height;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" root blkno %llu seq %llu height %u, key "SK_FMT", par_root blkno %llu seq %llu height %u",
|
||||
SCSB_TRACE_ARGS, __entry->root_blkno, __entry->root_seq,
|
||||
__entry->root_height, sk_trace_args(key),
|
||||
__entry->par_root_blkno, __entry->par_root_seq,
|
||||
__entry->par_root_height)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_merge,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, root, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, root_blkno)
|
||||
__field(__u64, root_seq)
|
||||
__field(__u8, root_height)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->root_blkno = le64_to_cpu(root->ref.blkno);
|
||||
__entry->root_seq = le64_to_cpu(root->ref.seq);
|
||||
__entry->root_height = root->height;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" root blkno %llu seq %llu height %u start "SK_FMT" end "SK_FMT,
|
||||
SCSB_TRACE_ARGS, __entry->root_blkno, __entry->root_seq,
|
||||
__entry->root_height, sk_trace_args(start),
|
||||
sk_trace_args(end))
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_merge_items,
|
||||
TP_PROTO(struct super_block *sb,
|
||||
struct scoutfs_btree_root *m_root,
|
||||
struct scoutfs_key *m_key, int m_val_len,
|
||||
struct scoutfs_btree_root *f_root,
|
||||
struct scoutfs_key *f_key, int f_val_len,
|
||||
int is_del),
|
||||
|
||||
TP_ARGS(sb, m_root, m_key, m_val_len, f_root, f_key, f_val_len, is_del),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, m_root_blkno)
|
||||
__field(__u64, m_root_seq)
|
||||
__field(__u8, m_root_height)
|
||||
sk_trace_define(m_key)
|
||||
__field(int, m_val_len)
|
||||
__field(__u64, f_root_blkno)
|
||||
__field(__u64, f_root_seq)
|
||||
__field(__u8, f_root_height)
|
||||
sk_trace_define(f_key)
|
||||
__field(int, f_val_len)
|
||||
__field(int, is_del)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->m_root_blkno = m_root ?
|
||||
le64_to_cpu(m_root->ref.blkno) : 0;
|
||||
__entry->m_root_seq = m_root ? le64_to_cpu(m_root->ref.seq) : 0;
|
||||
__entry->m_root_height = m_root ? m_root->height : 0;
|
||||
sk_trace_assign(m_key, m_key);
|
||||
__entry->m_val_len = m_val_len;
|
||||
__entry->f_root_blkno = f_root ?
|
||||
le64_to_cpu(f_root->ref.blkno) : 0;
|
||||
__entry->f_root_seq = f_root ? le64_to_cpu(f_root->ref.seq) : 0;
|
||||
__entry->f_root_height = f_root ? f_root->height : 0;
|
||||
sk_trace_assign(f_key, f_key);
|
||||
__entry->f_val_len = f_val_len;
|
||||
__entry->is_del = !!is_del;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" merge item root blkno %llu seq %llu height %u key "SK_FMT" val_len %d, fs item root blkno %llu seq %llu height %u key "SK_FMT" val_len %d, is_del %d",
|
||||
SCSB_TRACE_ARGS, __entry->m_root_blkno, __entry->m_root_seq,
|
||||
__entry->m_root_height, sk_trace_args(m_key),
|
||||
__entry->m_val_len, __entry->f_root_blkno,
|
||||
__entry->f_root_seq, __entry->f_root_height,
|
||||
sk_trace_args(f_key), __entry->f_val_len, __entry->is_del)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_btree_free_blocks,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
u64 blkno),
|
||||
|
||||
TP_ARGS(sb, root, blkno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, root_blkno)
|
||||
__field(__u64, root_seq)
|
||||
__field(__u8, root_height)
|
||||
__field(__u64, blkno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->root_blkno = le64_to_cpu(root->ref.blkno);
|
||||
__entry->root_seq = le64_to_cpu(root->ref.seq);
|
||||
__entry->root_height = root->height;
|
||||
__entry->blkno = blkno;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" root blkno %llu seq %llu height %u, free blkno %llu",
|
||||
SCSB_TRACE_ARGS, __entry->root_blkno, __entry->root_seq,
|
||||
__entry->root_height, __entry->blkno)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_btree_free_blocks, scoutfs_btree_free_blocks_single,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
u64 blkno),
|
||||
TP_ARGS(sb, root, blkno)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_btree_free_blocks, scoutfs_btree_free_blocks_leaf,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
u64 blkno),
|
||||
TP_ARGS(sb, root, blkno)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_btree_free_blocks, scoutfs_btree_free_blocks_parent,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
u64 blkno),
|
||||
TP_ARGS(sb, root, blkno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_online_offline_blocks,
|
||||
TP_PROTO(struct inode *inode, s64 on_delta, s64 off_delta,
|
||||
u64 on_now, u64 off_now),
|
||||
@@ -1811,74 +1954,6 @@ TRACE_EVENT(scoutfs_quorum_loop,
|
||||
__entry->timeout_sec, __entry->timeout_nsec)
|
||||
);
|
||||
|
||||
/*
|
||||
* We can emit trace events to make it easier to synchronize the
|
||||
* monotonic clocks in trace logs between nodes. By looking at the send
|
||||
* and recv times of many messages flowing between nodes we can get
|
||||
* surprisingly good estimates of the clock offset between them.
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(scoutfs_clock_sync_class,
|
||||
TP_PROTO(__le64 clock_sync_id),
|
||||
TP_ARGS(clock_sync_id),
|
||||
TP_STRUCT__entry(
|
||||
__field(__u64, clock_sync_id)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->clock_sync_id = le64_to_cpu(clock_sync_id);
|
||||
),
|
||||
TP_printk("clock_sync_id %016llx", __entry->clock_sync_id)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_send_clock_sync,
|
||||
TP_PROTO(__le64 clock_sync_id),
|
||||
TP_ARGS(clock_sync_id)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_recv_clock_sync,
|
||||
TP_PROTO(__le64 clock_sync_id),
|
||||
TP_ARGS(clock_sync_id)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_advance,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, trans_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
__field(__u64, trans_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
__entry->trans_seq = trans_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx trans_seq %llu\n",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_remove,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, trans_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
__field(__u64, trans_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
__entry->trans_seq = trans_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx trans_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_last,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),
|
||||
|
||||
@@ -1900,6 +1975,114 @@ TRACE_EVENT(scoutfs_trans_seq_last,
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_get_log_merge_status,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, struct scoutfs_key *next_range_key,
|
||||
u64 nr_requests, u64 nr_complete, u64 seq),
|
||||
|
||||
TP_ARGS(sb, rid, next_range_key, nr_requests, nr_complete, seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
sk_trace_define(next_range_key)
|
||||
__field(__u64, nr_requests)
|
||||
__field(__u64, nr_complete)
|
||||
__field(__u64, seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
sk_trace_assign(next_range_key, next_range_key);
|
||||
__entry->nr_requests = nr_requests;
|
||||
__entry->nr_complete = nr_complete;
|
||||
__entry->seq = seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx next_range_key "SK_FMT" nr_requests %llu nr_complete %llu seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, sk_trace_args(next_range_key),
|
||||
__entry->nr_requests, __entry->nr_complete, __entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_get_log_merge_request,
|
||||
TP_PROTO(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_btree_root *root, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end, u64 input_seq, u64 seq),
|
||||
|
||||
TP_ARGS(sb, rid, root, start, end, input_seq, seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
__field(__u64, root_blkno)
|
||||
__field(__u64, root_seq)
|
||||
__field(__u8, root_height)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
__field(__u64, input_seq)
|
||||
__field(__u64, seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
__entry->root_blkno = le64_to_cpu(root->ref.blkno);
|
||||
__entry->root_seq = le64_to_cpu(root->ref.seq);
|
||||
__entry->root_height = root->height;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
__entry->input_seq = input_seq;
|
||||
__entry->seq = seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx root blkno %llu seq %llu height %u start "SK_FMT" end "SK_FMT" input_seq %llu seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->root_blkno,
|
||||
__entry->root_seq, __entry->root_height,
|
||||
sk_trace_args(start), sk_trace_args(end), __entry->input_seq,
|
||||
__entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_get_log_merge_complete,
|
||||
TP_PROTO(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_btree_root *root, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end, struct scoutfs_key *remain,
|
||||
u64 seq, u64 flags),
|
||||
|
||||
TP_ARGS(sb, rid, root, start, end, remain, seq, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, s_rid)
|
||||
__field(__u64, root_blkno)
|
||||
__field(__u64, root_seq)
|
||||
__field(__u8, root_height)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
sk_trace_define(remain)
|
||||
__field(__u64, seq)
|
||||
__field(__u64, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->s_rid = rid;
|
||||
__entry->root_blkno = le64_to_cpu(root->ref.blkno);
|
||||
__entry->root_seq = le64_to_cpu(root->ref.seq);
|
||||
__entry->root_height = root->height;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
sk_trace_assign(remain, remain);
|
||||
__entry->seq = seq;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx root blkno %llu seq %llu height %u start "SK_FMT" end "SK_FMT" remain "SK_FMT" seq %llu flags 0x%llx",
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->root_blkno,
|
||||
__entry->root_seq, __entry->root_height,
|
||||
sk_trace_args(start), sk_trace_args(end),
|
||||
sk_trace_args(remain), __entry->seq, __entry->flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_forest_bloom_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key,
|
||||
u64 rid, u64 nr, u64 blkno, u64 seq, unsigned int count),
|
||||
@@ -2358,6 +2541,36 @@ TRACE_EVENT(scoutfs_alloc_move,
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_alloc_extent_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_extent *ext),
|
||||
|
||||
TP_ARGS(sb, ext),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
STE_FIELDS(ext)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
STE_ASSIGN(ext, ext);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ext "STE_FMT, SCSB_TRACE_ARGS, STE_ENTRY_ARGS(ext))
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_alloc_extent_class, scoutfs_alloc_move_extent,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_extent *ext),
|
||||
TP_ARGS(sb, ext)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_alloc_extent_class, scoutfs_alloc_fill_extent,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_extent *ext),
|
||||
TP_ARGS(sb, ext)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_alloc_extent_class, scoutfs_alloc_empty_extent,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_extent *ext),
|
||||
TP_ARGS(sb, ext)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_item_read_page,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *pg_start, struct scoutfs_key *pg_end),
|
||||
|
||||
2737
kmod/src/server.c
2737
kmod/src/server.c
File diff suppressed because it is too large
Load Diff
@@ -56,13 +56,15 @@ do { \
|
||||
__entry->name##_data_len, __entry->name##_cmd, __entry->name##_flags, \
|
||||
__entry->name##_error
|
||||
|
||||
u64 scoutfs_server_reserved_meta_blocks(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
int scoutfs_server_hold_commit(struct super_block *sb);
|
||||
void scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
|
||||
@@ -71,8 +73,10 @@ int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
|
||||
int scoutfs_server_send_omap_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map *map, int err);
|
||||
|
||||
struct sockaddr_in;
|
||||
struct scoutfs_quorum_elected_info;
|
||||
u64 scoutfs_server_seq(struct super_block *sb);
|
||||
u64 scoutfs_server_next_seq(struct super_block *sb);
|
||||
void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq);
|
||||
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "btree.h"
|
||||
#include "spbm.h"
|
||||
#include "client.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -989,12 +990,13 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_srch_file *sfl)
|
||||
struct scoutfs_srch_file *sfl, bool force)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT)
|
||||
if (sfl->ref.blkno == 0 ||
|
||||
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
||||
return 0;
|
||||
|
||||
init_srch_key(&key, SCOUTFS_SRCH_LOG_TYPE,
|
||||
@@ -1480,10 +1482,11 @@ static int kway_merge(struct super_block *sb,
|
||||
int ind;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(nr <= 1))
|
||||
if (WARN_ON_ONCE(nr <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
nr_parents = roundup_pow_of_two(nr) - 1;
|
||||
/* always at least one parent for single leaf */
|
||||
nr_parents = max_t(unsigned long, 1, roundup_pow_of_two(nr) - 1);
|
||||
/* root at [1] for easy sib/parent index calc, final pad for odd sib */
|
||||
nr_nodes = 1 + nr_parents + nr + 1;
|
||||
tnodes = __vmalloc(nr_nodes * sizeof(struct tourn_node),
|
||||
@@ -2080,7 +2083,7 @@ static int delete_files(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_compact *sc)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sc->nr; i++) {
|
||||
@@ -2126,6 +2129,7 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
struct scoutfs_alloc alloc;
|
||||
unsigned long delay;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
sc = kmalloc(sizeof(struct scoutfs_srch_compact), GFP_NOFS);
|
||||
if (sc == NULL) {
|
||||
@@ -2164,10 +2168,14 @@ commit:
|
||||
sc->meta_freed = alloc.freed;
|
||||
sc->flags |= ret < 0 ? SCOUTFS_SRCH_COMPACT_FLAG_ERROR : 0;
|
||||
|
||||
ret = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
err = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
out:
|
||||
/* our allocators and files should be stable */
|
||||
WARN_ON_ONCE(ret == -ESTALE);
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, srch_compact_error);
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
|
||||
@@ -37,7 +37,7 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_srch_file *sfl);
|
||||
struct scoutfs_srch_file *sfl, bool force);
|
||||
int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
|
||||
268
kmod/src/super.c
268
kmod/src/super.c
@@ -20,7 +20,6 @@
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "block.h"
|
||||
@@ -46,70 +45,40 @@
|
||||
#include "alloc.h"
|
||||
#include "recov.h"
|
||||
#include "omap.h"
|
||||
#include "volopt.h"
|
||||
#include "fence.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
static struct dentry *scoutfs_debugfs_root;
|
||||
|
||||
static DEFINE_PER_CPU(u64, clock_sync_ids) = 0;
|
||||
|
||||
/*
|
||||
* Give the caller a unique clock sync id for a message they're about to
|
||||
* send. We make the ids reasonably globally unique by using randomly
|
||||
* initialized per-cpu 64bit counters.
|
||||
*/
|
||||
__le64 scoutfs_clock_sync_id(void)
|
||||
/* the statfs file fields can be small (and signed?) :/ */
|
||||
static __statfs_word saturate_truncated_word(u64 files)
|
||||
{
|
||||
u64 rnd = 0;
|
||||
u64 ret;
|
||||
u64 *id;
|
||||
__statfs_word word = files;
|
||||
|
||||
retry:
|
||||
preempt_disable();
|
||||
id = this_cpu_ptr(&clock_sync_ids);
|
||||
if (*id == 0) {
|
||||
if (rnd == 0) {
|
||||
preempt_enable();
|
||||
get_random_bytes(&rnd, sizeof(rnd));
|
||||
goto retry;
|
||||
}
|
||||
*id = rnd;
|
||||
if (word != files) {
|
||||
word = ~0ULL;
|
||||
if (word < 0)
|
||||
word = (unsigned long)word >> 1;
|
||||
}
|
||||
|
||||
ret = ++(*id);
|
||||
preempt_enable();
|
||||
|
||||
return cpu_to_le64(ret);
|
||||
}
|
||||
|
||||
struct statfs_free_blocks {
|
||||
u64 meta;
|
||||
u64 data;
|
||||
};
|
||||
|
||||
static int count_free_blocks(struct super_block *sb, void *arg, int owner,
|
||||
u64 id, bool meta, bool avail, u64 blocks)
|
||||
{
|
||||
struct statfs_free_blocks *sfb = arg;
|
||||
|
||||
if (meta)
|
||||
sfb->meta += blocks;
|
||||
else
|
||||
sfb->data += blocks;
|
||||
|
||||
return 0;
|
||||
return word;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build the free block counts by having alloc read all the persistent
|
||||
* blocks which contain allocators and calling us for each of them.
|
||||
* Only the super block reads aren't cached so repeatedly calling statfs
|
||||
* is like repeated O_DIRECT IO. We can add a cache and stale results
|
||||
* if that IO becomes a problem.
|
||||
* The server gives us the current sum of free blocks and the total
|
||||
* inode count that it can see across all the clients' log trees. It
|
||||
* won't see allocations and inode creations or deletions that are dirty
|
||||
* in client memory as it builds a transaction.
|
||||
*
|
||||
* We fake the number of free inodes value by assuming that we can fill
|
||||
* free blocks with a certain number of inodes. We then the number of
|
||||
* current inodes to that free count to determine the total possible
|
||||
* inodes.
|
||||
* We don't have static limits on the number of files so the statfs
|
||||
* fields for the total possible files and the number free isn't
|
||||
* particularly helpful. What we do want to report is the number of
|
||||
* inodes, so we fake a max possible number of inodes given a
|
||||
* conservative estimate of the total space consumption per file and
|
||||
* then find the free by subtracting our precise count of active inodes.
|
||||
* This seems like the least surprising compromise where the file max
|
||||
* doesn't change and the caller gets the correct count of used inodes.
|
||||
*
|
||||
* The fsid that we report is constructed from the xor of the first two
|
||||
* and second two little endian u32s that make up the uuid bytes.
|
||||
@@ -117,41 +86,33 @@ static int count_free_blocks(struct super_block *sb, void *arg, int owner,
|
||||
static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
|
||||
{
|
||||
struct super_block *sb = dentry->d_inode->i_sb;
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct statfs_free_blocks sfb = {0,};
|
||||
struct scoutfs_net_statfs nst;
|
||||
u64 files;
|
||||
u64 ffree;
|
||||
__le32 uuid[4];
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, statfs);
|
||||
|
||||
super = kzalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
ret = scoutfs_client_statfs(sb, &nst);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_alloc_foreach(sb, count_free_blocks, &sfb);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
kst->f_bfree = (sfb.meta << SCOUTFS_BLOCK_SM_LG_SHIFT) + sfb.data;
|
||||
kst->f_bfree = (le64_to_cpu(nst.free_meta_blocks) << SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(nst.free_data_blocks);
|
||||
kst->f_type = SCOUTFS_SUPER_MAGIC;
|
||||
kst->f_bsize = SCOUTFS_BLOCK_SM_SIZE;
|
||||
kst->f_blocks = (le64_to_cpu(super->total_meta_blocks) <<
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(super->total_data_blocks);
|
||||
kst->f_blocks = (le64_to_cpu(nst.total_meta_blocks) << SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(nst.total_data_blocks);
|
||||
kst->f_bavail = kst->f_bfree;
|
||||
|
||||
/* arbitrarily assume ~1K / empty file */
|
||||
kst->f_ffree = sfb.meta * (SCOUTFS_BLOCK_LG_SIZE / 1024);
|
||||
kst->f_files = kst->f_ffree + le64_to_cpu(super->next_ino);
|
||||
files = div_u64(le64_to_cpu(nst.total_meta_blocks) << SCOUTFS_BLOCK_LG_SHIFT, 2048);
|
||||
ffree = files - le64_to_cpu(nst.inode_count);
|
||||
kst->f_files = saturate_truncated_word(files);
|
||||
kst->f_ffree = saturate_truncated_word(ffree);
|
||||
|
||||
BUILD_BUG_ON(sizeof(uuid) != sizeof(super->uuid));
|
||||
memcpy(uuid, super->uuid, sizeof(uuid));
|
||||
BUILD_BUG_ON(sizeof(uuid) != sizeof(nst.uuid));
|
||||
memcpy(uuid, nst.uuid, sizeof(uuid));
|
||||
kst->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[1]);
|
||||
kst->f_fsid.val[1] = le32_to_cpu(uuid[2]) ^ le32_to_cpu(uuid[3]);
|
||||
kst->f_namelen = SCOUTFS_NAME_LEN;
|
||||
@@ -160,8 +121,6 @@ static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
|
||||
/* the vfs fills f_flags */
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(super);
|
||||
|
||||
/*
|
||||
* We don't take cluster locks in statfs which makes it a very
|
||||
* convenient place to trigger lock reclaim for debugging. We
|
||||
@@ -228,7 +187,15 @@ static void scoutfs_metadev_close(struct super_block *sb)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi->meta_bdev) {
|
||||
/*
|
||||
* Some kernels have blkdev_reread_part which calls
|
||||
* fsync_bdev while holding the bd_mutex which inverts
|
||||
* the s_umount hold in deactivate_super and blkdev_put
|
||||
* from kill_sb->put_super.
|
||||
*/
|
||||
lockdep_off();
|
||||
blkdev_put(sbi->meta_bdev, SCOUTFS_META_BDEV_MODE);
|
||||
lockdep_on();
|
||||
sbi->meta_bdev = NULL;
|
||||
}
|
||||
}
|
||||
@@ -245,14 +212,23 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
|
||||
trace_scoutfs_put_super(sb);
|
||||
|
||||
scoutfs_srch_destroy(sb);
|
||||
/*
|
||||
* Wait for invalidation and iput to finish with any lingering
|
||||
* inode references that escaped the evict_inodes in
|
||||
* generic_shutdown_super. MS_ACTIVE is clear so final iput
|
||||
* will always evict.
|
||||
*/
|
||||
scoutfs_lock_flush_invalidate(sb);
|
||||
scoutfs_inode_flush_iput(sb);
|
||||
WARN_ON_ONCE(!list_empty(&sb->s_inodes));
|
||||
|
||||
scoutfs_unlock(sb, sbi->rid_lock, SCOUTFS_LOCK_WRITE);
|
||||
sbi->rid_lock = NULL;
|
||||
scoutfs_forest_stop(sb);
|
||||
scoutfs_srch_destroy(sb);
|
||||
|
||||
scoutfs_lock_shutdown(sb);
|
||||
|
||||
scoutfs_shutdown_trans(sb);
|
||||
scoutfs_volopt_destroy(sb);
|
||||
scoutfs_client_destroy(sb);
|
||||
scoutfs_inode_destroy(sb);
|
||||
scoutfs_item_destroy(sb);
|
||||
@@ -268,6 +244,7 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
|
||||
scoutfs_block_destroy(sb);
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_fence_destroy(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
scoutfs_sysfs_destroy_attrs(sb, &sbi->mopts_ssa);
|
||||
debugfs_remove(sbi->debug_root);
|
||||
@@ -281,6 +258,23 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
sb->s_fs_info = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that we're performing a forced unmount. As put_super drives
|
||||
* destruction of the filesystem we won't issue more network or storage
|
||||
* operations because we assume that they'll hang. Pending operations
|
||||
* can return errors when it's possible to do so. We may be racing with
|
||||
* pending operations which can't be canceled.
|
||||
*/
|
||||
static void scoutfs_umount_begin(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
scoutfs_warn(sb, "forcing unmount, can return errors and lose unsynced data");
|
||||
sbi->forced_unmount = true;
|
||||
|
||||
scoutfs_client_net_shutdown(sb);
|
||||
}
|
||||
|
||||
static const struct super_operations scoutfs_super_ops = {
|
||||
.alloc_inode = scoutfs_alloc_inode,
|
||||
.drop_inode = scoutfs_drop_inode,
|
||||
@@ -290,6 +284,7 @@ static const struct super_operations scoutfs_super_ops = {
|
||||
.statfs = scoutfs_statfs,
|
||||
.show_options = scoutfs_show_options,
|
||||
.put_super = scoutfs_put_super,
|
||||
.umount_begin = scoutfs_umount_begin,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -309,28 +304,16 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
sizeof(struct scoutfs_super_block));
|
||||
}
|
||||
|
||||
static bool invalid_blkno_limits(struct super_block *sb, char *which,
|
||||
u64 start, __le64 first, __le64 last,
|
||||
struct block_device *bdev, int shift)
|
||||
static bool small_bdev(struct super_block *sb, char *which, u64 blocks,
|
||||
struct block_device *bdev, int shift)
|
||||
{
|
||||
u64 blkno;
|
||||
u64 size = (u64)i_size_read(bdev->bd_inode);
|
||||
u64 count = size >> shift;
|
||||
|
||||
if (le64_to_cpu(first) < start) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
|
||||
which, le64_to_cpu(first), start);
|
||||
return true;
|
||||
}
|
||||
if (blocks > count) {
|
||||
scoutfs_err(sb, "super block records %llu %s blocks, but device %u:%u size %llu only allows %llu blocks",
|
||||
blocks, which, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev), size, count);
|
||||
|
||||
if (le64_to_cpu(first) > le64_to_cpu(last)) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
|
||||
which, le64_to_cpu(first), which, le64_to_cpu(last));
|
||||
return true;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
|
||||
if (le64_to_cpu(last) > blkno) {
|
||||
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
|
||||
which, le64_to_cpu(last), blkno);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -379,27 +362,32 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->fmt_vers) < SCOUTFS_FORMAT_VERSION_MIN ||
|
||||
le64_to_cpu(super->fmt_vers) > SCOUTFS_FORMAT_VERSION_MAX) {
|
||||
scoutfs_err(sb, "super block has format version %llu outside of supported version range %u-%u",
|
||||
le64_to_cpu(super->fmt_vers), SCOUTFS_FORMAT_VERSION_MIN,
|
||||
SCOUTFS_FORMAT_VERSION_MAX);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (super->version != cpu_to_le64(SCOUTFS_INTEROP_VERSION)) {
|
||||
scoutfs_err(sb, "super block has invalid version %llu, expected %llu",
|
||||
le64_to_cpu(super->version),
|
||||
SCOUTFS_INTEROP_VERSION);
|
||||
/*
|
||||
* fill_supers checks the fmt_vers in both supers and then decides to use it.
|
||||
* From then on we verify that the supers we read have that version.
|
||||
*/
|
||||
if (sbi->fmt_vers != 0 && le64_to_cpu(super->fmt_vers) != sbi->fmt_vers) {
|
||||
scoutfs_err(sb, "super block has format version %llu than %llu read at mount",
|
||||
le64_to_cpu(super->fmt_vers), sbi->fmt_vers);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX do we want more rigorous invalid super checking? */
|
||||
|
||||
if (invalid_blkno_limits(sb, "meta",
|
||||
SCOUTFS_META_DEV_START_BLKNO,
|
||||
super->first_meta_blkno,
|
||||
super->last_meta_blkno, sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
invalid_blkno_limits(sb, "data",
|
||||
SCOUTFS_DATA_DEV_START_BLKNO,
|
||||
super->first_data_blkno,
|
||||
super->last_data_blkno, sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
if (small_bdev(sb, "metadata", le64_to_cpu(super->total_meta_blocks), sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
small_bdev(sb, "data", le64_to_cpu(super->total_data_blocks), sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -506,6 +494,14 @@ static int scoutfs_read_supers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(meta_super->fmt_vers) != le64_to_cpu(data_super->fmt_vers)) {
|
||||
scoutfs_err(sb, "meta device format version %llu != data device format version %llu",
|
||||
le64_to_cpu(meta_super->fmt_vers), le64_to_cpu(data_super->fmt_vers));
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
sbi->fmt_vers = le64_to_cpu(meta_super->fmt_vers);
|
||||
sbi->super = *meta_super;
|
||||
out:
|
||||
kfree(meta_super);
|
||||
@@ -527,6 +523,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
sb->s_op = &scoutfs_super_ops;
|
||||
sb->s_export_op = &scoutfs_export_ops;
|
||||
sb->s_flags |= MS_I_VERSION;
|
||||
|
||||
/* btree blocks use long lived bh->b_data refs */
|
||||
mapping_set_gfp_mask(sb->s_bdev->bd_inode->i_mapping, GFP_NOFS);
|
||||
@@ -542,12 +539,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&sbi->next_ino_lock);
|
||||
init_waitqueue_head(&sbi->trans_hold_wq);
|
||||
spin_lock_init(&sbi->data_wait_root.lock);
|
||||
sbi->data_wait_root.root = RB_ROOT;
|
||||
spin_lock_init(&sbi->trans_write_lock);
|
||||
INIT_DELAYED_WORK(&sbi->trans_write_work, scoutfs_trans_write_func);
|
||||
init_waitqueue_head(&sbi->trans_write_wq);
|
||||
scoutfs_sysfs_init_attrs(sb, &sbi->mopts_ssa);
|
||||
|
||||
ret = scoutfs_parse_options(sb, data, &opts);
|
||||
@@ -588,6 +581,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_sysfs_create_attrs(sb, &sbi->mopts_ssa,
|
||||
mount_options_attrs, "mount_options") ?:
|
||||
scoutfs_setup_triggers(sb) ?:
|
||||
scoutfs_fence_setup(sb) ?:
|
||||
scoutfs_block_setup(sb) ?:
|
||||
scoutfs_forest_setup(sb) ?:
|
||||
scoutfs_item_setup(sb) ?:
|
||||
@@ -601,16 +595,17 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_server_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_client_setup(sb) ?:
|
||||
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
|
||||
&sbi->rid_lock) ?:
|
||||
scoutfs_trans_get_log_trees(sb) ?:
|
||||
scoutfs_volopt_setup(sb) ?:
|
||||
scoutfs_srch_setup(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO);
|
||||
/* this interruptible iget lets hung mount be aborted with ctl-c */
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO, SCOUTFS_LKF_INTERRUPTIBLE, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -620,12 +615,15 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_client_advance_seq(sb, &sbi->trans_seq);
|
||||
/* send requests once iget progress shows we had a server */
|
||||
ret = scoutfs_trans_get_log_trees(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* start up background services that use everything else */
|
||||
scoutfs_inode_start(sb);
|
||||
scoutfs_forest_start(sb);
|
||||
scoutfs_trans_restart_sync_deadline(sb);
|
||||
// scoutfs_scan_orphans(sb);
|
||||
ret = 0;
|
||||
out:
|
||||
/* on error, generic_shutdown_super calls put_super if s_root */
|
||||
@@ -646,10 +644,17 @@ static struct dentry *scoutfs_mount(struct file_system_type *fs_type, int flags,
|
||||
*/
|
||||
static void scoutfs_kill_sb(struct super_block *sb)
|
||||
{
|
||||
trace_scoutfs_kill_sb(sb);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (SCOUTFS_HAS_SBI(sb))
|
||||
if (sbi) {
|
||||
sbi->unmounting = true;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
if (SCOUTFS_HAS_SBI(sb)) {
|
||||
scoutfs_inode_orphan_stop(sb);
|
||||
scoutfs_lock_unmount_begin(sb);
|
||||
}
|
||||
|
||||
kill_block_super(sb);
|
||||
}
|
||||
@@ -682,11 +687,15 @@ static int __init scoutfs_module_init(void)
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
".section .note.git_describe,\"a\"\n"
|
||||
".string \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".ascii \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_interop_version,\"a\"\n"
|
||||
".string \""SCOUTFS_INTEROP_VERSION_STR"\\n\"\n"
|
||||
".section .note.scoutfs_format_version_min,\"a\"\n"
|
||||
".ascii \""SCOUTFS_FORMAT_VERSION_MIN_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_format_version_max,\"a\"\n"
|
||||
".ascii \""SCOUTFS_FORMAT_VERSION_MAX_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
|
||||
scoutfs_init_counters();
|
||||
@@ -720,4 +729,5 @@ module_exit(scoutfs_module_exit)
|
||||
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_INFO(git_describe, SCOUTFS_GIT_DESCRIBE);
|
||||
MODULE_INFO(scoutfs_interop_version, SCOUTFS_INTEROP_VERSION_STR);
|
||||
MODULE_INFO(scoutfs_format_version_min, SCOUTFS_FORMAT_VERSION_MIN_STR);
|
||||
MODULE_INFO(scoutfs_format_version_max, SCOUTFS_FORMAT_VERSION_MAX_STR);
|
||||
|
||||
@@ -28,13 +28,15 @@ struct forest_info;
|
||||
struct srch_info;
|
||||
struct recov_info;
|
||||
struct omap_info;
|
||||
struct volopt_info;
|
||||
struct fence_info;
|
||||
|
||||
struct scoutfs_sb_info {
|
||||
struct super_block *sb;
|
||||
|
||||
/* assigned once at the start of each mount, read-only */
|
||||
u64 rid;
|
||||
struct scoutfs_lock *rid_lock;
|
||||
u64 fmt_vers;
|
||||
|
||||
struct scoutfs_super_block super;
|
||||
|
||||
@@ -51,22 +53,15 @@ struct scoutfs_sb_info {
|
||||
struct forest_info *forest_info;
|
||||
struct srch_info *srch_info;
|
||||
struct omap_info *omap_info;
|
||||
struct volopt_info *volopt_info;
|
||||
struct item_cache_info *item_cache_info;
|
||||
|
||||
wait_queue_head_t trans_hold_wq;
|
||||
struct task_struct *trans_task;
|
||||
struct fence_info *fence_info;
|
||||
|
||||
/* tracks tasks waiting for data extents */
|
||||
struct scoutfs_data_wait_root data_wait_root;
|
||||
|
||||
spinlock_t trans_write_lock;
|
||||
u64 trans_write_count;
|
||||
/* set as transaction opens with trans holders excluded */
|
||||
u64 trans_seq;
|
||||
int trans_write_ret;
|
||||
struct delayed_work trans_write_work;
|
||||
wait_queue_head_t trans_write_wq;
|
||||
struct workqueue_struct *trans_write_workq;
|
||||
bool trans_deadline_expired;
|
||||
|
||||
struct trans_info *trans_info;
|
||||
struct lock_info *lock_info;
|
||||
@@ -85,6 +80,9 @@ struct scoutfs_sb_info {
|
||||
|
||||
struct dentry *debug_root;
|
||||
|
||||
bool forced_unmount;
|
||||
bool unmounting;
|
||||
|
||||
unsigned long corruption_messages_once[SC_NR_LONGS];
|
||||
};
|
||||
|
||||
@@ -105,6 +103,26 @@ static inline bool SCOUTFS_IS_META_BDEV(struct scoutfs_super_block *super_block)
|
||||
|
||||
#define SCOUTFS_META_BDEV_MODE (FMODE_READ | FMODE_WRITE | FMODE_EXCL)
|
||||
|
||||
static inline bool scoutfs_forcing_unmount(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return sbi->forced_unmount;
|
||||
}
|
||||
|
||||
/*
|
||||
* True if we're shutting down the system and can be used as a coarse
|
||||
* indicator that we can avoid doing some work that no longer makes
|
||||
* sense.
|
||||
*/
|
||||
static inline bool scoutfs_unmounting(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
smp_rmb();
|
||||
return !sbi || sbi->unmounting;
|
||||
}
|
||||
|
||||
/*
|
||||
* A small string embedded in messages that's used to identify a
|
||||
* specific mount. It's the three most significant bytes of the fsid
|
||||
@@ -142,6 +160,4 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
/* to keep this out of the ioctl.h public interface definition */
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
||||
__le64 scoutfs_clock_sync_id(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -37,6 +37,16 @@ struct attr_funcs {
|
||||
#define ATTR_FUNCS_RO(_name) \
|
||||
static struct attr_funcs _name##_attr_funcs = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t format_version_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sbi->fmt_vers);
|
||||
}
|
||||
ATTR_FUNCS_RO(format_version);
|
||||
|
||||
static ssize_t fsid_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -91,6 +101,7 @@ static ssize_t attr_funcs_show(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
|
||||
static struct attribute *sb_id_attrs[] = {
|
||||
&format_version_attr_funcs.attr,
|
||||
&fsid_attr_funcs.attr,
|
||||
&rid_attr_funcs.attr,
|
||||
NULL,
|
||||
@@ -131,9 +142,10 @@ void scoutfs_sysfs_init_attrs(struct super_block *sb,
|
||||
* If this returns success then the file will be visible and show can
|
||||
* be called until unmount.
|
||||
*/
|
||||
int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...)
|
||||
int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
struct kobject *parent,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
size_t name_len;
|
||||
@@ -174,8 +186,8 @@ int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kobject_init_and_add(&ssa->kobj, &ssa->ktype,
|
||||
scoutfs_sysfs_sb_dir(sb), "%s", ssa->name);
|
||||
ret = kobject_init_and_add(&ssa->kobj, &ssa->ktype, parent,
|
||||
"%s", ssa->name);
|
||||
out:
|
||||
if (ret) {
|
||||
kfree(ssa->name);
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
|
||||
#define SCOUTFS_ATTR_RO(_name) \
|
||||
static struct kobj_attribute scoutfs_attr_##_name = __ATTR_RO(_name)
|
||||
#define SCOUTFS_ATTR_RW(_name) \
|
||||
static struct kobj_attribute scoutfs_attr_##_name = __ATTR_RW(_name)
|
||||
|
||||
#define SCOUTFS_ATTR_PTR(_name) \
|
||||
&scoutfs_attr_##_name.attr
|
||||
@@ -34,9 +36,14 @@ struct scoutfs_sysfs_attrs {
|
||||
|
||||
void scoutfs_sysfs_init_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa);
|
||||
int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...);
|
||||
int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
struct kobject *parent,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...);
|
||||
#define scoutfs_sysfs_create_attrs(sb, ssa, attrs, fmt, args...) \
|
||||
scoutfs_sysfs_create_attrs_parent(sb, scoutfs_sysfs_sb_dir(sb), \
|
||||
ssa, attrs, fmt, ##args)
|
||||
|
||||
void scoutfs_sysfs_destroy_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa);
|
||||
|
||||
|
||||
326
kmod/src/trans.c
326
kmod/src/trans.c
@@ -17,6 +17,7 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "trans.h"
|
||||
@@ -53,15 +54,24 @@
|
||||
/* sync dirty data at least this often */
|
||||
#define TRANS_SYNC_DELAY (HZ * 10)
|
||||
|
||||
/*
|
||||
* XXX move the rest of the super trans_ fields here.
|
||||
*/
|
||||
struct trans_info {
|
||||
struct super_block *sb;
|
||||
|
||||
atomic_t holders;
|
||||
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_alloc alloc;
|
||||
struct scoutfs_block_writer wri;
|
||||
|
||||
wait_queue_head_t hold_wq;
|
||||
struct task_struct *task;
|
||||
spinlock_t write_lock;
|
||||
u64 write_count;
|
||||
int write_ret;
|
||||
struct delayed_work write_work;
|
||||
wait_queue_head_t write_wq;
|
||||
struct workqueue_struct *write_workq;
|
||||
bool deadline_expired;
|
||||
};
|
||||
|
||||
#define DECLARE_TRANS_INFO(sb, name) \
|
||||
@@ -91,6 +101,7 @@ static int commit_btrees(struct super_block *sb)
|
||||
*/
|
||||
int scoutfs_trans_get_log_trees(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_log_trees lt;
|
||||
int ret = 0;
|
||||
@@ -103,6 +114,11 @@ int scoutfs_trans_get_log_trees(struct super_block *sb)
|
||||
|
||||
scoutfs_forest_init_btrees(sb, &tri->alloc, &tri->wri, <);
|
||||
scoutfs_data_init_btrees(sb, &tri->alloc, &tri->wri, <);
|
||||
|
||||
/* first set during mount from 0 to nonzero allows commits */
|
||||
spin_lock(&tri->write_lock);
|
||||
sbi->trans_seq = le64_to_cpu(lt.get_trans_seq);
|
||||
spin_unlock(&tri->write_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -120,13 +136,12 @@ bool scoutfs_trans_has_dirty(struct super_block *sb)
|
||||
*/
|
||||
static void sub_holders_and_wake(struct super_block *sb, int val)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
atomic_sub(val, &tri->holders);
|
||||
smp_mb(); /* make sure sub is visible before we wake */
|
||||
if (waitqueue_active(&sbi->trans_hold_wq))
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
if (waitqueue_active(&tri->hold_wq))
|
||||
wake_up(&tri->hold_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -154,90 +169,93 @@ static bool drained_holders(struct trans_info *tri)
|
||||
* functions that would try to hold the transaction. We record the task
|
||||
* whose committing the transaction so that holding won't deadlock.
|
||||
*
|
||||
* Any dirty block had to have allocated a new blkno which would have
|
||||
* created dirty allocator metadata blocks. We can avoid writing
|
||||
* entirely if we don't have any dirty metadata blocks. This is
|
||||
* important because we don't try to serialize this work during
|
||||
* unmount.. we can execute as the vfs is shutting down.. we need to
|
||||
* decide that nothing is dirty without calling the vfs at all.
|
||||
* Once we clear the write func bit in holders then waiting holders can
|
||||
* enter the transaction and continue modifying the transaction. Once
|
||||
* we start writing we consider the transaction done and won't exit,
|
||||
* clearing the write func bit, until get_log_trees has opened the next
|
||||
* transaction. The exception is forced unmount which is allowed to
|
||||
* generate errors and throw away data.
|
||||
*
|
||||
* We first try to sync the dirty inodes and write their dirty data blocks,
|
||||
* then we write all our dirty metadata blocks, and only when those succeed
|
||||
* do we write the new super that references all of these newly written blocks.
|
||||
*
|
||||
* If there are write errors then blocks are kept dirty in memory and will
|
||||
* be written again at the next sync.
|
||||
* This means that the only way fsync can return an error is if we're in
|
||||
* forced unmount.
|
||||
*/
|
||||
void scoutfs_trans_write_func(struct work_struct *work)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = container_of(work, struct scoutfs_sb_info,
|
||||
trans_write_work.work);
|
||||
struct super_block *sb = sbi->sb;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
u64 trans_seq = sbi->trans_seq;
|
||||
struct trans_info *tri = container_of(work, struct trans_info, write_work.work);
|
||||
struct super_block *sb = tri->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
bool retrying = false;
|
||||
char *s = NULL;
|
||||
int ret = 0;
|
||||
|
||||
sbi->trans_task = current;
|
||||
tri->task = current;
|
||||
|
||||
/* mark that we're writing so holders wait for us to finish and clear our bit */
|
||||
atomic_add(TRANS_HOLDERS_WRITE_FUNC_BIT, &tri->holders);
|
||||
|
||||
wait_event(sbi->trans_hold_wq, drained_holders(tri));
|
||||
wait_event(tri->hold_wq, drained_holders(tri));
|
||||
|
||||
trace_scoutfs_trans_write_func(sb,
|
||||
scoutfs_block_writer_dirty_bytes(sb, &tri->wri));
|
||||
|
||||
if (!scoutfs_block_writer_has_dirty(sb, &tri->wri) &&
|
||||
!scoutfs_item_dirty_pages(sb)) {
|
||||
if (sbi->trans_deadline_expired) {
|
||||
/*
|
||||
* If we're not writing data then we only advance the
|
||||
* seq at the sync deadline interval. This keeps idle
|
||||
* mounts from pinning a seq and stopping readers of the
|
||||
* seq indices but doesn't send a message for every sync
|
||||
* syscall.
|
||||
*/
|
||||
ret = scoutfs_client_advance_seq(sb, &trans_seq);
|
||||
if (ret < 0)
|
||||
s = "clean advance seq";
|
||||
}
|
||||
/* mount hasn't opened first transaction yet, still complete sync */
|
||||
if (sbi->trans_seq == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sbi->trans_deadline_expired)
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_scoutfs_trans_write_func(sb, scoutfs_block_writer_dirty_bytes(sb, &tri->wri),
|
||||
scoutfs_item_dirty_pages(sb));
|
||||
|
||||
if (tri->deadline_expired)
|
||||
scoutfs_inc_counter(sb, trans_commit_timer);
|
||||
|
||||
scoutfs_inc_counter(sb, trans_commit_written);
|
||||
|
||||
/* XXX this all needs serious work for dealing with errors */
|
||||
ret = (s = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(s = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(s = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(s = "alloc prepare", scoutfs_alloc_prepare_commit(sb,
|
||||
&tri->alloc, &tri->wri)) ?:
|
||||
(s = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(s = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(s = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb) ?:
|
||||
(s = "advance seq", scoutfs_client_advance_seq(sb, &trans_seq)) ?:
|
||||
(s = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
out:
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "critical transaction commit failure: %s, %d",
|
||||
s, ret);
|
||||
do {
|
||||
ret = (s = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(s = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(s = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(s = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc,
|
||||
&tri->wri)) ?:
|
||||
(s = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(s = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(s = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb) ?:
|
||||
(s = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
s, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
sbi->trans_write_count++;
|
||||
sbi->trans_write_ret = ret;
|
||||
sbi->trans_seq = trans_seq;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
wake_up(&sbi->trans_write_wq);
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
out:
|
||||
spin_lock(&tri->write_lock);
|
||||
tri->write_count++;
|
||||
tri->write_ret = ret;
|
||||
spin_unlock(&tri->write_lock);
|
||||
wake_up(&tri->write_wq);
|
||||
|
||||
/* we're done, wake waiting holders */
|
||||
sub_holders_and_wake(sb, TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
|
||||
sbi->trans_task = NULL;
|
||||
tri->task = NULL;
|
||||
|
||||
scoutfs_trans_restart_sync_deadline(sb);
|
||||
}
|
||||
@@ -248,17 +266,17 @@ struct write_attempt {
|
||||
};
|
||||
|
||||
/* this is called as a wait_event() condition so it can't change task state */
|
||||
static int write_attempted(struct scoutfs_sb_info *sbi,
|
||||
struct write_attempt *attempt)
|
||||
static int write_attempted(struct super_block *sb, struct write_attempt *attempt)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
int done = 1;
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
if (sbi->trans_write_count > attempt->count)
|
||||
attempt->ret = sbi->trans_write_ret;
|
||||
spin_lock(&tri->write_lock);
|
||||
if (tri->write_count > attempt->count)
|
||||
attempt->ret = tri->write_ret;
|
||||
else
|
||||
done = 0;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
spin_unlock(&tri->write_lock);
|
||||
|
||||
return done;
|
||||
}
|
||||
@@ -268,10 +286,12 @@ static int write_attempted(struct scoutfs_sb_info *sbi,
|
||||
* We always have delayed sync work pending but the caller wants it
|
||||
* to execute immediately.
|
||||
*/
|
||||
static void queue_trans_work(struct scoutfs_sb_info *sbi)
|
||||
static void queue_trans_work(struct super_block *sb)
|
||||
{
|
||||
sbi->trans_deadline_expired = false;
|
||||
mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work, 0);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
tri->deadline_expired = false;
|
||||
mod_delayed_work(tri->write_workq, &tri->write_work, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -284,26 +304,24 @@ static void queue_trans_work(struct scoutfs_sb_info *sbi)
|
||||
*/
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct write_attempt attempt;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct write_attempt attempt = { .ret = 0 };
|
||||
int ret;
|
||||
|
||||
|
||||
if (!wait) {
|
||||
queue_trans_work(sbi);
|
||||
queue_trans_work(sb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
attempt.count = sbi->trans_write_count;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
spin_lock(&tri->write_lock);
|
||||
attempt.count = tri->write_count;
|
||||
spin_unlock(&tri->write_lock);
|
||||
|
||||
queue_trans_work(sbi);
|
||||
queue_trans_work(sb);
|
||||
|
||||
ret = wait_event_interruptible(sbi->trans_write_wq,
|
||||
write_attempted(sbi, &attempt));
|
||||
if (ret == 0)
|
||||
ret = attempt.ret;
|
||||
wait_event(tri->write_wq, write_attempted(sb, &attempt));
|
||||
ret = attempt.ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -319,10 +337,10 @@ int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
sbi->trans_deadline_expired = true;
|
||||
mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work,
|
||||
tri->deadline_expired = true;
|
||||
mod_delayed_work(tri->write_workq, &tri->write_work,
|
||||
TRANS_SYNC_DELAY);
|
||||
}
|
||||
|
||||
@@ -430,8 +448,8 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try to refill data allocator before premature enospc */
|
||||
if (scoutfs_data_alloc_free_bytes(sb) <= SCOUTFS_TRANS_DATA_ALLOC_LWM) {
|
||||
/* if we're low and can't refill then alloc could empty and return enospc */
|
||||
if (scoutfs_data_alloc_should_refill(sb, SCOUTFS_ALLOC_DATA_REFILL_THRESH)) {
|
||||
scoutfs_inc_counter(sb, trans_commit_data_alloc_low);
|
||||
return true;
|
||||
}
|
||||
@@ -439,38 +457,15 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool acquired_hold(struct super_block *sb)
|
||||
/*
|
||||
* called as a wait_event condition, needs to be careful to not change
|
||||
* task state and is racing with waking paths that sub_return, test, and
|
||||
* wake.
|
||||
*/
|
||||
static bool holders_no_writer(struct trans_info *tri)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired;
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
acquired = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait if the writer is blocking holds */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait if we're triggering another commit */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
release_holders(sb);
|
||||
queue_trans_work(sbi);
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_scoutfs_trans_acquired_hold(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
acquired = true;
|
||||
out:
|
||||
return acquired;
|
||||
smp_mb(); /* make sure task in wait_event queue before atomic read */
|
||||
return !(atomic_read(&tri->holders) & TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -486,15 +481,65 @@ out:
|
||||
* The writing thread marks itself as a global trans_task which
|
||||
* short-circuits all the hold machinery so it can call code that would
|
||||
* otherwise try to hold transactions while it is writing.
|
||||
*
|
||||
* If the caller is adding metadata items that will eventually consume
|
||||
* free space -- not dirtying existing items or adding deletion items --
|
||||
* then we can return enospc if our metadata allocator indicates that
|
||||
* we're low on space.
|
||||
*/
|
||||
int scoutfs_hold_trans(struct super_block *sb)
|
||||
int scoutfs_hold_trans(struct super_block *sb, bool allocing)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
if (current == tri->task)
|
||||
return 0;
|
||||
|
||||
return wait_event_interruptible(sbi->trans_hold_wq, acquired_hold(sb));
|
||||
for (;;) {
|
||||
/* shouldn't get holders until mount finishes, (not locking for cheap test) */
|
||||
if (WARN_ON_ONCE(sbi->trans_seq == 0)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* wait until the writer work is finished */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
wait_event(tri->hold_wq, holders_no_writer(tri));
|
||||
continue;
|
||||
}
|
||||
|
||||
/* return enospc if server is into reserved blocks and we're allocating */
|
||||
if (allocing && scoutfs_alloc_test_flag(sb, &tri->alloc, SCOUTFS_ALLOC_FLAG_LOW)) {
|
||||
release_holders(sb);
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
/* see if we need to trigger and wait for a commit before holding */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
seq = scoutfs_trans_sample_seq(sb);
|
||||
release_holders(sb);
|
||||
queue_trans_work(sb);
|
||||
wait_event(tri->hold_wq, scoutfs_trans_sample_seq(sb) != seq);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_hold_trans(sb, current->journal_info, atomic_read(&tri->holders), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -511,15 +556,14 @@ bool scoutfs_trans_held(void)
|
||||
|
||||
void scoutfs_release_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
if (current == tri->task)
|
||||
return;
|
||||
|
||||
release_holders(sb);
|
||||
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -529,12 +573,13 @@ void scoutfs_release_trans(struct super_block *sb)
|
||||
*/
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
u64 ret;
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
spin_lock(&tri->write_lock);
|
||||
ret = sbi->trans_seq;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
spin_unlock(&tri->write_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -548,12 +593,17 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
if (!tri)
|
||||
return -ENOMEM;
|
||||
|
||||
tri->sb = sb;
|
||||
atomic_set(&tri->holders, 0);
|
||||
scoutfs_block_writer_init(sb, &tri->wri);
|
||||
|
||||
sbi->trans_write_workq = alloc_workqueue("scoutfs_trans",
|
||||
WQ_UNBOUND, 1);
|
||||
if (!sbi->trans_write_workq) {
|
||||
spin_lock_init(&tri->write_lock);
|
||||
INIT_DELAYED_WORK(&tri->write_work, scoutfs_trans_write_func);
|
||||
init_waitqueue_head(&tri->write_wq);
|
||||
init_waitqueue_head(&tri->hold_wq);
|
||||
|
||||
tri->write_workq = alloc_workqueue("scoutfs_trans", WQ_UNBOUND, 1);
|
||||
if (!tri->write_workq) {
|
||||
kfree(tri);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -580,14 +630,14 @@ void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
if (tri) {
|
||||
if (sbi->trans_write_workq) {
|
||||
if (tri->write_workq) {
|
||||
/* immediately queues pending timer */
|
||||
flush_delayed_work(&sbi->trans_write_work);
|
||||
flush_delayed_work(&tri->write_work);
|
||||
/* prevents re-arming if it has to wait */
|
||||
cancel_delayed_work_sync(&sbi->trans_write_work);
|
||||
destroy_workqueue(sbi->trans_write_workq);
|
||||
cancel_delayed_work_sync(&tri->write_work);
|
||||
destroy_workqueue(tri->write_workq);
|
||||
/* trans work schedules after shutdown see null */
|
||||
sbi->trans_write_workq = NULL;
|
||||
tri->write_workq = NULL;
|
||||
}
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &tri->wri);
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
#ifndef _SCOUTFS_TRANS_H_
|
||||
#define _SCOUTFS_TRANS_H_
|
||||
|
||||
/* the server will attempt to fill data allocs for each trans */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_HWM (2ULL * 1024 * 1024 * 1024)
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
|
||||
|
||||
void scoutfs_trans_write_func(struct work_struct *work);
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait);
|
||||
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb);
|
||||
int scoutfs_hold_trans(struct super_block *sb, bool allocing);
|
||||
bool scoutfs_trans_held(void);
|
||||
void scoutfs_release_trans(struct super_block *sb);
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb);
|
||||
|
||||
188
kmod/src/volopt.c
Normal file
188
kmod/src/volopt.c
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "client.h"
|
||||
#include "volopt.h"
|
||||
|
||||
/*
|
||||
* Volume options are exposed through a sysfs directory. Getting and
|
||||
* setting the values sends rpcs to the server who owns the options in
|
||||
* the super block.
|
||||
*/
|
||||
|
||||
struct volopt_info {
|
||||
struct super_block *sb;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
};
|
||||
|
||||
#define DECLARE_VOLOPT_INFO(sb, name) \
|
||||
struct volopt_info *name = SCOUTFS_SB(sb)->volopt_info
|
||||
#define DECLARE_VOLOPT_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_VOLOPT_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
/*
|
||||
* attribute arrays need to be dense but the options we export could
|
||||
* well become sparse over time. .store and .load are generic and we
|
||||
* have a lookup table to map the attributes array indexes to the number
|
||||
* and name of the option.
|
||||
*/
|
||||
static struct volopt_nr_name {
|
||||
int nr;
|
||||
char *name;
|
||||
} volopt_table[] = {
|
||||
{ SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_NR, "data_alloc_zone_blocks" },
|
||||
};
|
||||
|
||||
/* initialized by setup, pointer array is null terminated */
|
||||
static struct kobj_attribute volopt_attrs[ARRAY_SIZE(volopt_table)];
|
||||
static struct attribute *volopt_attr_ptrs[ARRAY_SIZE(volopt_table) + 1];
|
||||
|
||||
static void get_opt_data(struct kobj_attribute *attr, struct scoutfs_volume_options *volopt,
|
||||
u64 *bit, __le64 **opt)
|
||||
{
|
||||
size_t index = attr - &volopt_attrs[0];
|
||||
int nr = volopt_table[index].nr;
|
||||
|
||||
*bit = 1ULL << nr;
|
||||
*opt = &volopt->set_bits + 1 + nr;
|
||||
}
|
||||
|
||||
static ssize_t volopt_attr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_VOLOPT_INFO_KOBJ(kobj, vinf);
|
||||
struct super_block *sb = vinf->sb;
|
||||
struct scoutfs_volume_options volopt;
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_volopt(sb, &volopt);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
get_opt_data(attr, &volopt, &bit, &opt);
|
||||
|
||||
if (le64_to_cpu(volopt.set_bits) & bit) {
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", le64_to_cpup(opt));
|
||||
} else {
|
||||
buf[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t volopt_attr_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
DECLARE_VOLOPT_INFO_KOBJ(kobj, vinf);
|
||||
struct super_block *sb = vinf->sb;
|
||||
struct scoutfs_volume_options volopt = {0,};
|
||||
u8 chars[32];
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (count == 0)
|
||||
return 0;
|
||||
if (count > sizeof(chars) - 1)
|
||||
return -ERANGE;
|
||||
|
||||
get_opt_data(attr, &volopt, &bit, &opt);
|
||||
|
||||
if (buf[0] == '\n' || buf[0] == '\r') {
|
||||
volopt.set_bits = cpu_to_le64(bit);
|
||||
|
||||
ret = scoutfs_client_clear_volopt(sb, &volopt);
|
||||
} else {
|
||||
memcpy(chars, buf, count);
|
||||
chars[count] = '\0';
|
||||
ret = kstrtoull(chars, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
volopt.set_bits = cpu_to_le64(bit);
|
||||
*opt = cpu_to_le64(val);
|
||||
|
||||
ret = scoutfs_client_set_volopt(sb, &volopt);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
ret = count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The volume option sysfs files are slim shims around RPCs so this
|
||||
* should be called after the client is setup and before it is torn
|
||||
* down.
|
||||
*/
|
||||
int scoutfs_volopt_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct volopt_info *vinf;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* persistent volume options are always a bitmap u64 then the 64 options */
|
||||
BUILD_BUG_ON(sizeof(struct scoutfs_volume_options) != (1 + 64) * 8);
|
||||
|
||||
vinf = kzalloc(sizeof(struct volopt_info), GFP_KERNEL);
|
||||
if (!vinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_sysfs_init_attrs(sb, &vinf->ssa);
|
||||
vinf->sb = sb;
|
||||
sbi->volopt_info = vinf;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(volopt_table); i++) {
|
||||
volopt_attrs[i] = (struct kobj_attribute) {
|
||||
.attr = { .name = volopt_table[i].name, .mode = S_IWUSR | S_IRUGO },
|
||||
.show = volopt_attr_show,
|
||||
.store = volopt_attr_store,
|
||||
};
|
||||
volopt_attr_ptrs[i] = &volopt_attrs[i].attr;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(volopt_table) != ARRAY_SIZE(volopt_attr_ptrs) - 1);
|
||||
volopt_attr_ptrs[i] = NULL;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &vinf->ssa, volopt_attr_ptrs, "volume_options");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_volopt_destroy(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_volopt_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct volopt_info *vinf = SCOUTFS_SB(sb)->volopt_info;
|
||||
|
||||
if (vinf) {
|
||||
scoutfs_sysfs_destroy_attrs(sb, &vinf->ssa);
|
||||
kfree(vinf);
|
||||
sbi->volopt_info = NULL;
|
||||
}
|
||||
}
|
||||
7
kmod/src/volopt.h
Normal file
7
kmod/src/volopt.h
Normal file
@@ -0,0 +1,7 @@
|
||||
#ifndef _SCOUTFS_VOLOPT_H_
|
||||
#define _SCOUTFS_VOLOPT_H_
|
||||
|
||||
int scoutfs_volopt_setup(struct super_block *sb);
|
||||
void scoutfs_volopt_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
211
kmod/src/xattr.c
211
kmod/src/xattr.c
@@ -97,6 +97,7 @@ static int unknown_prefix(const char *name)
|
||||
|
||||
#define HIDE_TAG "hide."
|
||||
#define SRCH_TAG "srch."
|
||||
#define TOTL_TAG "totl."
|
||||
#define TAG_LEN (sizeof(HIDE_TAG) - 1)
|
||||
|
||||
int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
@@ -119,6 +120,9 @@ int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
} else if (!strncmp(name, SRCH_TAG, TAG_LEN)) {
|
||||
if (++tgs->srch == 0)
|
||||
return -EINVAL;
|
||||
} else if (!strncmp(name, TOTL_TAG, TAG_LEN)) {
|
||||
if (++tgs->totl == 0)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* only reason to use scoutfs. is tags */
|
||||
if (!found)
|
||||
@@ -364,7 +368,7 @@ static int change_xattr_items(struct inode *inode, u64 id,
|
||||
}
|
||||
|
||||
/* update dirtied overlapping existing items, last partial first */
|
||||
for (i = old_parts - 1; i >= 0; i--) {
|
||||
for (i = min(old_parts, new_parts) - 1; i >= 0; i--) {
|
||||
off = i * SCOUTFS_XATTR_MAX_PART_SIZE;
|
||||
bytes = min_t(unsigned int, new_bytes - off,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
@@ -468,6 +472,100 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_xattr_init_totl_key(struct scoutfs_key *key, u64 *name)
|
||||
{
|
||||
scoutfs_key_set_zeros(key);
|
||||
key->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
key->skxt_a = cpu_to_le64(name[0]);
|
||||
key->skxt_b = cpu_to_le64(name[1]);
|
||||
key->skxt_c = cpu_to_le64(name[2]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse a u64 in any base after null terminating it while forbidding
|
||||
* the leading + and trailing \n that kstrotull allows.
|
||||
*/
|
||||
static int parse_totl_u64(const char *s, int len, u64 *res)
|
||||
{
|
||||
char str[SCOUTFS_XATTR_MAX_TOTL_U64 + 1];
|
||||
|
||||
if (len <= 0 || len >= ARRAY_SIZE(str) || s[0] == '+' || s[len - 1] == '\n')
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(str, s, len);
|
||||
str[len] = '\0';
|
||||
|
||||
return kstrtoull(str, 0, res) != 0 ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* non-destructive relatively quick parse of the last 3 dotted u64s that
|
||||
* make up the name of the xattr total. -EINVAL is returned if there
|
||||
* are anything but 3 valid u64 encodings between single dots at the end
|
||||
* of the name.
|
||||
*/
|
||||
static int parse_totl_key(struct scoutfs_key *key, const char *name, int name_len)
|
||||
{
|
||||
u64 tot_name[3];
|
||||
int end = name_len;
|
||||
int nr = 0;
|
||||
int len;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* parse name elements in reserve order from end of xattr name string */
|
||||
for (i = name_len - 1; i >= 0 && nr < ARRAY_SIZE(tot_name); i--) {
|
||||
if (name[i] != '.')
|
||||
continue;
|
||||
|
||||
len = end - (i + 1);
|
||||
ret = parse_totl_u64(&name[i + 1], len, &tot_name[nr]);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
end = i;
|
||||
nr++;
|
||||
}
|
||||
|
||||
if (nr == ARRAY_SIZE(tot_name)) {
|
||||
/* swap to account for parsing in reverse */
|
||||
swap(tot_name[0], tot_name[2]);
|
||||
scoutfs_xattr_init_totl_key(key, tot_name);
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int apply_totl_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_xattr_totl_val *tval, struct scoutfs_lock *lock)
|
||||
{
|
||||
if (tval->total == 0 && tval->count == 0)
|
||||
return 0;
|
||||
|
||||
return scoutfs_item_delta(sb, key, tval, sizeof(*tval), lock);
|
||||
}
|
||||
|
||||
int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len)
|
||||
{
|
||||
struct scoutfs_xattr_totl_val *s_tval = src;
|
||||
struct scoutfs_xattr_totl_val *d_tval = dst;
|
||||
|
||||
if (src_len != sizeof(*s_tval) || dst_len != src_len)
|
||||
return -EIO;
|
||||
|
||||
le64_add_cpu(&d_tval->total, le64_to_cpu(s_tval->total));
|
||||
le64_add_cpu(&d_tval->count, le64_to_cpu(s_tval->count));
|
||||
|
||||
if (d_tval->total == 0 && d_tval->count == 0)
|
||||
return SCOUTFS_DELTA_COMBINED_NULL;
|
||||
|
||||
return SCOUTFS_DELTA_COMBINED;
|
||||
}
|
||||
|
||||
/*
|
||||
* The confusing swiss army knife of creating, modifying, and deleting
|
||||
* xattrs.
|
||||
@@ -486,16 +584,22 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_xattr_totl_val tval = {0,};
|
||||
struct scoutfs_xattr_prefix_tags tgs;
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *lck = NULL;
|
||||
struct scoutfs_lock *totl_lock = NULL;
|
||||
size_t name_len = strlen(name);
|
||||
struct scoutfs_key totl_key;
|
||||
struct scoutfs_key key;
|
||||
bool undo_srch = false;
|
||||
bool undo_totl = false;
|
||||
LIST_HEAD(ind_locks);
|
||||
u8 found_parts;
|
||||
unsigned int bytes;
|
||||
unsigned int val_len;
|
||||
u64 ind_seq;
|
||||
u64 total;
|
||||
u64 hash = 0;
|
||||
u64 id = 0;
|
||||
int ret;
|
||||
@@ -519,11 +623,15 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
if (scoutfs_xattr_parse_tags(name, name_len, &tgs) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if ((tgs.hide || tgs.srch) && !capable(CAP_SYS_ADMIN))
|
||||
if ((tgs.hide | tgs.srch | tgs.totl) && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (tgs.totl && ((ret = parse_totl_key(&totl_key, name, name_len)) != 0))
|
||||
return ret;
|
||||
|
||||
bytes = sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
xat = __vmalloc(bytes, GFP_NOFS, PAGE_KERNEL);
|
||||
/* alloc enough to read old totl value */
|
||||
xat = __vmalloc(bytes + SCOUTFS_XATTR_MAX_TOTL_U64, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!xat) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -536,9 +644,9 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
|
||||
down_write(&si->xattr_rwsem);
|
||||
|
||||
/* find an existing xattr to delete */
|
||||
/* find an existing xattr to delete, including possible totl value */
|
||||
ret = get_next_xattr(inode, &key, xat,
|
||||
sizeof(struct scoutfs_xattr) + name_len,
|
||||
sizeof(struct scoutfs_xattr) + name_len + SCOUTFS_XATTR_MAX_TOTL_U64,
|
||||
name, name_len, 0, 0, lck);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
goto unlock;
|
||||
@@ -558,9 +666,23 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* s64 count delta if we create or delete */
|
||||
if (tgs.totl)
|
||||
tval.count = cpu_to_le64((u64)!!(value) - (u64)!!(ret != -ENOENT));
|
||||
|
||||
/* found fields in key will also be used */
|
||||
found_parts = ret >= 0 ? xattr_nr_parts(xat) : 0;
|
||||
|
||||
if (found_parts && tgs.totl) {
|
||||
/* parse old totl value before we clobber xat buf */
|
||||
val_len = ret - offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
ret = parse_totl_u64(&xat->name[xat->name_len], val_len, &total);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
le64_add_cpu(&tval.total, -total);
|
||||
}
|
||||
|
||||
/* prepare our xattr */
|
||||
if (value) {
|
||||
if (found_parts)
|
||||
@@ -572,12 +694,26 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
memset(xat->__pad, 0, sizeof(xat->__pad));
|
||||
memcpy(xat->name, name, name_len);
|
||||
memcpy(&xat->name[xat->name_len], value, size);
|
||||
|
||||
if (tgs.totl) {
|
||||
ret = parse_totl_u64(value, size, &total);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
le64_add_cpu(&tval.total, total);
|
||||
}
|
||||
|
||||
if (tgs.totl) {
|
||||
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &totl_lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq, true);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@@ -597,6 +733,13 @@ retry:
|
||||
undo_srch = true;
|
||||
}
|
||||
|
||||
if (tgs.totl) {
|
||||
ret = apply_totl_delta(sb, &totl_key, &tval, totl_lock);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
undo_totl = true;
|
||||
}
|
||||
|
||||
if (found_parts && value)
|
||||
ret = change_xattr_items(inode, id, xat, bytes,
|
||||
xattr_nr_parts(xat), found_parts, lck);
|
||||
@@ -620,12 +763,20 @@ release:
|
||||
err = scoutfs_forest_srch_add(sb, hash, ino, id);
|
||||
BUG_ON(err);
|
||||
}
|
||||
if (ret < 0 && undo_totl) {
|
||||
/* _delta() on dirty items shouldn't fail */
|
||||
tval.total = cpu_to_le64(-le64_to_cpu(tval.total));
|
||||
tval.count = cpu_to_le64(-le64_to_cpu(tval.count));
|
||||
err = apply_totl_delta(sb, &totl_key, &tval, totl_lock);
|
||||
BUG_ON(err);
|
||||
}
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
unlock:
|
||||
up_write(&si->xattr_rwsem);
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, totl_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
out:
|
||||
vfree(xat);
|
||||
|
||||
@@ -746,15 +897,22 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
{
|
||||
struct scoutfs_xattr_prefix_tags tgs;
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *totl_lock = NULL;
|
||||
struct scoutfs_xattr_totl_val tval;
|
||||
struct scoutfs_key totl_key;
|
||||
struct scoutfs_key last;
|
||||
struct scoutfs_key key;
|
||||
bool release = false;
|
||||
unsigned int bytes;
|
||||
unsigned int val_len;
|
||||
void *value;
|
||||
u64 total;
|
||||
u64 hash;
|
||||
int ret;
|
||||
|
||||
/* need a buffer large enough for all possible names */
|
||||
bytes = sizeof(struct scoutfs_xattr) + SCOUTFS_XATTR_MAX_NAME_LEN;
|
||||
/* need a buffer large enough for all possible names and totl value */
|
||||
bytes = sizeof(struct scoutfs_xattr) + SCOUTFS_XATTR_MAX_NAME_LEN +
|
||||
SCOUTFS_XATTR_MAX_TOTL_U64;
|
||||
xat = kmalloc(bytes, GFP_NOFS);
|
||||
if (!xat) {
|
||||
ret = -ENOMEM;
|
||||
@@ -773,12 +931,38 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
break;
|
||||
}
|
||||
|
||||
if (key.skx_part == 0 && (ret < sizeof(struct scoutfs_xattr) ||
|
||||
ret < offsetof(struct scoutfs_xattr, name[xat->name_len]))) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (key.skx_part != 0 ||
|
||||
scoutfs_xattr_parse_tags(xat->name, xat->name_len,
|
||||
&tgs) != 0)
|
||||
memset(&tgs, 0, sizeof(tgs));
|
||||
|
||||
ret = scoutfs_hold_trans(sb);
|
||||
if (tgs.totl) {
|
||||
value = &xat->name[xat->name_len];
|
||||
val_len = ret - offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
if (val_len != le16_to_cpu(xat->val_len)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = parse_totl_key(&totl_key, xat->name, xat->name_len) ?:
|
||||
parse_totl_u64(value, val_len, &total);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (tgs.totl && totl_lock == NULL) {
|
||||
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &totl_lock);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_hold_trans(sb, false);
|
||||
if (ret < 0)
|
||||
break;
|
||||
release = true;
|
||||
@@ -795,6 +979,14 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
break;
|
||||
}
|
||||
|
||||
if (tgs.totl) {
|
||||
tval.total = cpu_to_le64(-total);
|
||||
tval.count = cpu_to_le64(-1LL);
|
||||
ret = apply_totl_delta(sb, &totl_key, &tval, totl_lock);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
release = false;
|
||||
|
||||
@@ -803,6 +995,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
|
||||
if (release)
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_unlock(sb, totl_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
kfree(xat);
|
||||
out:
|
||||
return ret;
|
||||
|
||||
@@ -16,10 +16,14 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
srch:1;
|
||||
srch:1,
|
||||
totl:1;
|
||||
};
|
||||
|
||||
int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
struct scoutfs_xattr_prefix_tags *tgs);
|
||||
|
||||
void scoutfs_xattr_init_totl_key(struct scoutfs_key *key, u64 *name);
|
||||
int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len);
|
||||
|
||||
#endif
|
||||
|
||||
3
tests/.gitignore
vendored
3
tests/.gitignore
vendored
@@ -1,6 +1,9 @@
|
||||
src/*.d
|
||||
src/createmany
|
||||
src/dumb_renameat2
|
||||
src/dumb_setxattr
|
||||
src/handle_cat
|
||||
src/bulk_create_paths
|
||||
src/find_xattrs
|
||||
src/stage_tmpfile
|
||||
src/create_xattr_loop
|
||||
|
||||
@@ -3,11 +3,13 @@ SHELL := /usr/bin/bash
|
||||
|
||||
# each binary command is built from a single .c file
|
||||
BIN := src/createmany \
|
||||
src/dumb_renameat2 \
|
||||
src/dumb_setxattr \
|
||||
src/handle_cat \
|
||||
src/bulk_create_paths \
|
||||
src/stage_tmpfile \
|
||||
src/find_xattrs
|
||||
src/find_xattrs \
|
||||
src/create_xattr_loop
|
||||
|
||||
DEPS := $(wildcard src/*.d)
|
||||
|
||||
|
||||
35
tests/fenced-local-force-unmount.sh
Executable file
35
tests/fenced-local-force-unmount.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
#
|
||||
# Look for a local mount with the rid to fence. Typically we'll at
|
||||
# least find the mount with the server that requested the fence that
|
||||
# we're processing. But it's possible that mounts are unmounted
|
||||
# before, or while, we're running.
|
||||
#
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET) || \
|
||||
echo_fail "findmnt -t scoutfs failed" > /dev/stderr
|
||||
|
||||
for mnt in $mnts; do
|
||||
mnt_rid=$(scoutfs statfs -p "$mnt" -s rid) || \
|
||||
echo_fail "scoutfs statfs $mnt failed"
|
||||
|
||||
if [ "$mnt_rid" == "$rid" ]; then
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
|
||||
#
|
||||
# If the mount doesn't exist on this host then it can't access the
|
||||
# devices by definition and can be considered fenced.
|
||||
#
|
||||
exit 0
|
||||
@@ -40,7 +40,7 @@ t_filter_dmesg()
|
||||
# mount and unmount spew a bunch
|
||||
re="$re|scoutfs.*client connected"
|
||||
re="$re|scoutfs.*client disconnected"
|
||||
re="$re|scoutfs.*server setting up"
|
||||
re="$re|scoutfs.*server starting"
|
||||
re="$re|scoutfs.*server ready"
|
||||
re="$re|scoutfs.*server accepted"
|
||||
re="$re|scoutfs.*server closing"
|
||||
@@ -62,5 +62,22 @@ t_filter_dmesg()
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
|
||||
# fencing tests force unmounts and trigger timeouts
|
||||
re="$re|scoutfs .* forcing unmount"
|
||||
re="$re|scoutfs .* reconnect timed out"
|
||||
re="$re|scoutfs .* recovery timeout expired"
|
||||
re="$re|scoutfs .* fencing previous leader"
|
||||
re="$re|scoutfs .* reclaimed resources"
|
||||
re="$re|scoutfs .* quorum .* error"
|
||||
re="$re|scoutfs .* error reading quorum block"
|
||||
re="$re|scoutfs .* error .* writing quorum block"
|
||||
re="$re|scoutfs .* error .* while checking to delete inode"
|
||||
re="$re|scoutfs .* error .*writing btree blocks.*"
|
||||
re="$re|scoutfs .* error .*writing super block.*"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.looping commit del.*upd freeing item"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.final commit del.upd freeing item"
|
||||
re="$re|scoutfs .* error .*reading quorum block.*to update event.*"
|
||||
re="$re|scoutfs .* error.*server failed to bind to.*"
|
||||
|
||||
egrep -v "($re)"
|
||||
}
|
||||
|
||||
@@ -17,6 +17,17 @@ t_sync_seq_index()
|
||||
t_quiet sync
|
||||
}
|
||||
|
||||
t_mount_rid()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
local rid
|
||||
|
||||
rid=$(scoutfs statfs -s rid -p "$mnt")
|
||||
|
||||
echo "$rid"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
@@ -132,6 +143,16 @@ t_umount()
|
||||
eval t_quiet umount \$T_M$nr
|
||||
}
|
||||
|
||||
t_force_umount()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount -f \$T_M$nr
|
||||
}
|
||||
|
||||
#
|
||||
# Attempt to mount all the configured mounts, assuming that they're
|
||||
# not already mounted.
|
||||
@@ -277,3 +298,67 @@ t_counter_diff_changed() {
|
||||
echo "counter $which didn't change" ||
|
||||
echo "counter $which changed"
|
||||
}
|
||||
|
||||
#
|
||||
# See if we can find a local mount with the caller's rid.
|
||||
#
|
||||
t_rid_is_mounted() {
|
||||
local rid="$1"
|
||||
local fr="$1"
|
||||
|
||||
for fr in /sys/fs/scoutfs/*; do
|
||||
if [ "$(cat $fr/rid)" == "$rid" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# A given mount is being fenced if any mount has a fence request pending
|
||||
# for it which hasn't finished and been removed.
|
||||
#
|
||||
t_rid_is_fencing() {
|
||||
local rid="$1"
|
||||
local fr
|
||||
|
||||
for fr in /sys/fs/scoutfs/*; do
|
||||
if [ -d "$fr/fence/$rid" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# Wait until the mount identified by the first rid arg is not in any
|
||||
# states specified by the remaining state description word args.
|
||||
#
|
||||
t_wait_if_rid_is() {
|
||||
local rid="$1"
|
||||
|
||||
while ( [[ $* =~ mounted ]] && t_rid_is_mounted $rid ) ||
|
||||
( [[ $* =~ fencing ]] && t_rid_is_fencing $rid ) ; do
|
||||
sleep .5
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Wait until any mount identifies itself as the elected leader. We can
|
||||
# be waiting while tests mount and unmount so mounts may not be mounted
|
||||
# at the test's expected mount points.
|
||||
#
|
||||
t_wait_for_leader() {
|
||||
local i
|
||||
|
||||
while sleep .25; do
|
||||
for i in $(t_fs_nrs); do
|
||||
local ldr="$(t_sysfs_path $i 2>/dev/null)/quorum/is_leader"
|
||||
if [ "$(cat $ldr 2>/dev/null)" == "1" ]; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
@@ -53,3 +53,5 @@ mv: cannot move ‘/mnt/test/test/basic-posix-consistency/dir/c/clobber’ to
|
||||
== inode indexes match after syncing existing
|
||||
== inode indexes match after copying and syncing
|
||||
== inode indexes match after removing and syncing
|
||||
== concurrent creates make one file
|
||||
one-file
|
||||
|
||||
@@ -1,52 +1,2 @@
|
||||
== create shared test file
|
||||
== set and get xattrs between mount pairs while retrying
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="1"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="2"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="3"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="4"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="5"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="6"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="7"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="8"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="9"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="10"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
== Issue scoutfs df to force block reads to trigger stale invalidation/retry
|
||||
counter block_cache_remove_stale changed
|
||||
|
||||
1
tests/golden/client-unmount-recovery
Normal file
1
tests/golden/client-unmount-recovery
Normal file
@@ -0,0 +1 @@
|
||||
== 60s of unmounting non-quorum clients during recovery
|
||||
8
tests/golden/enospc
Normal file
8
tests/golden/enospc
Normal file
@@ -0,0 +1,8 @@
|
||||
== prepare directories and files
|
||||
== fallocate until enospc
|
||||
== remove all the files and verify free data blocks
|
||||
== make small meta fs
|
||||
== create large xattrs until we fill up metadata
|
||||
== remove files with xattrs after enospc
|
||||
== make sure we can create again
|
||||
== cleanup small meta fs
|
||||
5
tests/golden/fence-and-reclaim
Normal file
5
tests/golden/fence-and-reclaim
Normal file
@@ -0,0 +1,5 @@
|
||||
== make sure all mounts can see each other
|
||||
== force unmount one client, connection timeout, fence nop, mount
|
||||
== force unmount all non-server, connection timeout, fence nop, mount
|
||||
== force unmount server, quorum elects new leader, fence nop, mount
|
||||
== force unmount everything, new server fences all previous
|
||||
4
tests/golden/orphan-inodes
Normal file
4
tests/golden/orphan-inodes
Normal file
@@ -0,0 +1,4 @@
|
||||
== test our inode existance function
|
||||
== unlinked and opened inodes still exist
|
||||
== orphan from failed evict deletion is picked up
|
||||
== orphaned inos in all mounts all deleted
|
||||
2
tests/golden/renameat2-noreplace
Normal file
2
tests/golden/renameat2-noreplace
Normal file
@@ -0,0 +1,2 @@
|
||||
=== renameat2 noreplace flag test
|
||||
=== run two asynchronous calls to renameat2 NOREPLACE
|
||||
27
tests/golden/resize-devices
Normal file
27
tests/golden/resize-devices
Normal file
@@ -0,0 +1,27 @@
|
||||
== make initial small fs
|
||||
== 0s do nothing
|
||||
== shrinking fails
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
== existing sizes do nothing
|
||||
== growing outside device fails
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
== resizing meta works
|
||||
== resizing data works
|
||||
== shrinking back fails
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
resize_devices ioctl failed: Invalid argument (22)
|
||||
scoutfs: resize-devices failed: Invalid argument (22)
|
||||
== resizing again does nothing
|
||||
== resizing to full works
|
||||
== cleanup extra fs
|
||||
@@ -16,3 +16,4 @@ setfattr: /mnt/test/test/simple-xattr-unit/file: Numerical result out of range
|
||||
setfattr: /mnt/test/test/simple-xattr-unit/file: Argument list too long
|
||||
=== good length boundaries
|
||||
=== 500 random lengths
|
||||
=== alternate val size between interesting sizes
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
== update existing xattr
|
||||
== remove an xattr
|
||||
== remove xattr with files
|
||||
== trigger small log merges by rotating single block with unmount
|
||||
== create entries in current log
|
||||
== delete small fraction
|
||||
== remove files
|
||||
|
||||
30
tests/golden/totl-xattr-tag
Normal file
30
tests/golden/totl-xattr-tag
Normal file
@@ -0,0 +1,30 @@
|
||||
== single file
|
||||
1.2.3 = 1, 1
|
||||
4.5.6 = 1, 1
|
||||
== multiple files add up
|
||||
1.2.3 = 2, 2
|
||||
4.5.6 = 2, 2
|
||||
== removing xattr updates total
|
||||
1.2.3 = 2, 2
|
||||
4.5.6 = 1, 1
|
||||
== updating xattr updates total
|
||||
1.2.3 = 11, 2
|
||||
4.5.6 = 1, 1
|
||||
== removing files update total
|
||||
1.2.3 = 10, 1
|
||||
== multiple files/names in one transaction
|
||||
1.2.3 = 55, 10
|
||||
== testing invalid names
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
== testing invalid values
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
setfattr: /mnt/test/test/totl-xattr-tag/invalid: Invalid argument
|
||||
== larger population that could merge
|
||||
@@ -9,6 +9,8 @@ generic/011
|
||||
generic/013
|
||||
generic/014
|
||||
generic/020
|
||||
generic/023
|
||||
generic/024
|
||||
generic/028
|
||||
generic/032
|
||||
generic/034
|
||||
@@ -82,6 +84,7 @@ generic/016
|
||||
generic/018
|
||||
generic/021
|
||||
generic/022
|
||||
generic/025
|
||||
generic/026
|
||||
generic/031
|
||||
generic/033
|
||||
@@ -93,6 +96,7 @@ generic/060
|
||||
generic/061
|
||||
generic/063
|
||||
generic/064
|
||||
generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
@@ -278,4 +282,4 @@ shared/004
|
||||
shared/032
|
||||
shared/051
|
||||
shared/289
|
||||
Passed all 73 tests
|
||||
Passed all 75 tests
|
||||
|
||||
@@ -18,10 +18,15 @@ die() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
timestamp()
|
||||
{
|
||||
date '+%F %T.%N'
|
||||
}
|
||||
|
||||
# output a message with a timestamp to the run.log
|
||||
log()
|
||||
{
|
||||
echo "[$(date '+%F %T.%N')] $*" >> "$T_RESULTS/run.log"
|
||||
echo "[$(timestamp)] $*" >> "$T_RESULTS/run.log"
|
||||
}
|
||||
|
||||
# run a logged command, exiting if it fails
|
||||
@@ -66,6 +71,7 @@ $(basename $0) options:
|
||||
-X | xfstests git repo. Used by tests/xfstests.sh.
|
||||
-x | xfstests git branch to checkout and track.
|
||||
-y | xfstests ./check additional args
|
||||
-z <nr> | set data-alloc-zone-blocks in mkfs
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -169,6 +175,11 @@ while true; do
|
||||
T_XFSTESTS_ARGS="$2"
|
||||
shift
|
||||
;;
|
||||
-z)
|
||||
test -n "$2" || die "-z must have nr mounts argument"
|
||||
T_DATA_ALLOC_ZONE_BLOCKS="-z $2"
|
||||
shift
|
||||
;;
|
||||
-h|-\?|--help)
|
||||
show_help
|
||||
exit 1
|
||||
@@ -216,8 +227,9 @@ test "$T_QUORUM" -le "$T_NR_MOUNTS" || \
|
||||
die "-q quorum mmembers must not be greater than -n mounts"
|
||||
|
||||
# top level paths
|
||||
T_KMOD=$(realpath "$(dirname $0)/../kmod")
|
||||
T_UTILS=$(realpath "$T_KMOD/../utils")
|
||||
T_TESTS=$(realpath "$(dirname $0)")
|
||||
T_KMOD=$(realpath "$T_TESTS/../kmod")
|
||||
T_UTILS=$(realpath "$T_TESTS/../utils")
|
||||
|
||||
test -d "$T_KMOD" || die "kmod/ repo dir $T_KMOD not directory"
|
||||
test -d "$T_UTILS" || die "utils/ repo dir $T_UTILS not directory"
|
||||
@@ -243,17 +255,20 @@ test -e "$T_RESULTS" || mkdir -p "$T_RESULTS"
|
||||
test -d "$T_RESULTS" || \
|
||||
die "$T_RESULTS dir is not a directory"
|
||||
|
||||
# might as well build our stuff with all cpus, assuming idle system
|
||||
MAKE_ARGS="-j $(getconf _NPROCESSORS_ONLN)"
|
||||
|
||||
# build kernel module
|
||||
msg "building kmod/ dir $T_KMOD"
|
||||
cmd cd "$T_KMOD"
|
||||
cmd make
|
||||
cmd make $MAKE_ARGS
|
||||
cmd sync
|
||||
cmd cd -
|
||||
|
||||
# build utils
|
||||
msg "building utils/ dir $T_UTILS"
|
||||
cmd cd "$T_UTILS"
|
||||
cmd make
|
||||
cmd make $MAKE_ARGS
|
||||
cmd sync
|
||||
cmd cd -
|
||||
|
||||
@@ -270,7 +285,7 @@ fi
|
||||
|
||||
# building our test binaries
|
||||
msg "building test binaries"
|
||||
cmd make
|
||||
cmd make $MAKE_ARGS
|
||||
|
||||
# set any options implied by others
|
||||
test -n "$T_MKFS" && T_UNMOUNT=1
|
||||
@@ -319,7 +334,8 @@ if [ -n "$T_MKFS" ]; then
|
||||
done
|
||||
|
||||
msg "making new filesystem with $T_QUORUM quorum members"
|
||||
cmd scoutfs mkfs -f $quo "$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
cmd scoutfs mkfs -f $quo $T_DATA_ALLOC_ZONE_BLOCKS \
|
||||
"$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
fi
|
||||
|
||||
if [ -n "$T_INSMOD" ]; then
|
||||
@@ -360,6 +376,39 @@ cmd cat /sys/kernel/debug/tracing/set_event
|
||||
cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
|
||||
#
|
||||
# Build a fenced config that runs scripts out of the repository rather
|
||||
# than the default system directory
|
||||
#
|
||||
conf="$T_RESULTS/scoutfs-fencd.conf"
|
||||
cat > $conf << EOF
|
||||
SCOUTFS_FENCED_DELAY=1
|
||||
SCOUTFS_FENCED_RUN=$T_TESTS/fenced-local-force-unmount.sh
|
||||
SCOUTFS_FENCED_RUN_ARGS=""
|
||||
EOF
|
||||
export SCOUTFS_FENCED_CONFIG_FILE="$conf"
|
||||
|
||||
#
|
||||
# Run the agent in the background, log its output, an kill it if we
|
||||
# exit
|
||||
#
|
||||
fenced_log()
|
||||
{
|
||||
echo "[$(timestamp)] $*" >> "$T_RESULTS/fenced.stdout.log"
|
||||
}
|
||||
fenced_pid=""
|
||||
kill_fenced()
|
||||
{
|
||||
if test -n "$fenced_pid" -a -d "/proc/$fenced_pid" ; then
|
||||
fenced_log "killing fenced pid $fenced_pid"
|
||||
kill "$fenced_pid"
|
||||
fi
|
||||
}
|
||||
trap kill_fenced EXIT
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_RESULTS/fenced.stdout.log" 2> "$T_RESULTS/fenced.stderr.log" &
|
||||
fenced_pid=$!
|
||||
fenced_log "started fenced pid $fenced_pid in the background"
|
||||
|
||||
#
|
||||
# mount concurrently so that a quorum is present to elect the leader and
|
||||
# start a server.
|
||||
|
||||
@@ -7,8 +7,10 @@ simple-release-extents.sh
|
||||
setattr_more.sh
|
||||
offline-extent-waiting.sh
|
||||
move-blocks.sh
|
||||
enospc.sh
|
||||
srch-basic-functionality.sh
|
||||
simple-xattr-unit.sh
|
||||
totl-xattr-tag.sh
|
||||
lock-refleak.sh
|
||||
lock-shrink-consistency.sh
|
||||
lock-pr-cw-conflict.sh
|
||||
@@ -27,9 +29,14 @@ lock-ex-race-processes.sh
|
||||
cross-mount-data-free.sh
|
||||
persistent-item-vers.sh
|
||||
setup-error-teardown.sh
|
||||
resize-devices.sh
|
||||
fence-and-reclaim.sh
|
||||
orphan-inodes.sh
|
||||
mount-unmount-race.sh
|
||||
client-unmount-recovery.sh
|
||||
createmany-parallel-mounts.sh
|
||||
archive-light-cycle.sh
|
||||
block-stale-reads.sh
|
||||
inode-deletion.sh
|
||||
renameat2-noreplace.sh
|
||||
xfstests.sh
|
||||
|
||||
113
tests/src/create_xattr_loop.c
Normal file
113
tests/src/create_xattr_loop.c
Normal file
@@ -0,0 +1,113 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/xattr.h>
|
||||
#include <ctype.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
|
||||
static void exit_usage(void)
|
||||
{
|
||||
printf(" -h/-? output this usage message and exit\n"
|
||||
" -c <count> number of xattrs to create\n"
|
||||
" -n <string> xattr name prefix, -NR is appended\n"
|
||||
" -p <path> string with path to file with xattrs\n"
|
||||
" -s <size> xattr value size\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char *pref = NULL;
|
||||
char *path = NULL;
|
||||
char *val;
|
||||
char *name;
|
||||
unsigned long long count = 0;
|
||||
unsigned long long size = 0;
|
||||
unsigned long long i;
|
||||
int ret;
|
||||
int c;
|
||||
|
||||
while ((c = getopt(argc, argv, "+c:n:p:s:")) != -1) {
|
||||
|
||||
switch (c) {
|
||||
case 'c':
|
||||
count = strtoull(optarg, NULL, 0);
|
||||
break;
|
||||
case 'n':
|
||||
pref = strdup(optarg);
|
||||
break;
|
||||
case 'p':
|
||||
path = strdup(optarg);
|
||||
break;
|
||||
case 's':
|
||||
size = strtoull(optarg, NULL, 0);
|
||||
break;
|
||||
case '?':
|
||||
printf("unknown argument: %c\n", optind);
|
||||
case 'h':
|
||||
exit_usage();
|
||||
}
|
||||
}
|
||||
|
||||
if (count == 0) {
|
||||
printf("specify count of xattrs to create with -c\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (count == ULLONG_MAX) {
|
||||
printf("invalid -c count\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
printf("specify xattrs value size with -s\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (size == ULLONG_MAX || size < 2) {
|
||||
printf("invalid -s size\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (path == NULL) {
|
||||
printf("specify path to file with -p\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (pref == NULL) {
|
||||
printf("specify xattr name prefix string with -n\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ret = snprintf(NULL, 0, "%s-%llu", pref, ULLONG_MAX) + 1;
|
||||
name = malloc(ret);
|
||||
if (!name) {
|
||||
printf("couldn't allocate xattr name buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
val = malloc(size);
|
||||
if (!val) {
|
||||
printf("couldn't allocate xattr value buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
memset(val, 'a', size - 1);
|
||||
val[size - 1] = '\0';
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
sprintf(name, "%s-%llu", pref, i);
|
||||
|
||||
ret = setxattr(path, name, val, size, 0);
|
||||
if (ret) {
|
||||
printf("returned %d errno %d (%s)\n",
|
||||
ret, errno, strerror(errno));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
93
tests/src/dumb_renameat2.c
Normal file
93
tests/src/dumb_renameat2.c
Normal file
@@ -0,0 +1,93 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#ifndef RENAMEAT2_EXIST
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#if !defined(SYS_renameat2) && defined(__x86_64__)
|
||||
#define SYS_renameat2 316 /* from arch/x86/entry/syscalls/syscall_64.tbl */
|
||||
#endif
|
||||
|
||||
static int renameat2(int olddfd, const char *old_dir,
|
||||
int newdfd, const char *new_dir,
|
||||
unsigned int flags)
|
||||
{
|
||||
#ifdef SYS_renameat2
|
||||
return syscall(SYS_renameat2, olddfd, old_dir, newdfd, new_dir, flags);
|
||||
#else
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef RENAME_NOREPLACE
|
||||
#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite newpath of rename */
|
||||
#endif
|
||||
#ifndef RENAME_EXCHANGE
|
||||
#define RENAME_EXCHANGE (1 << 1) /* Exchange oldpath and newpath */
|
||||
#endif
|
||||
#ifndef RENAME_WHITEOUT
|
||||
#define RENAME_WHITEOUT (1 << 2) /* Whiteout oldpath */
|
||||
#endif
|
||||
|
||||
static void exit_usage(char **argv)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"usage: %s [-n|-x|-w] old_path new_path\n"
|
||||
" -n noreplace\n"
|
||||
" -x exchange\n"
|
||||
" -w whiteout\n", argv[0]);
|
||||
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *old_path = NULL;
|
||||
const char *new_path = NULL;
|
||||
unsigned int flags = 0;
|
||||
int ret;
|
||||
int c;
|
||||
|
||||
for (c = 1; c < argc; c++) {
|
||||
if (argv[c][0] == '-') {
|
||||
switch (argv[c][1]) {
|
||||
case 'n':
|
||||
flags |= RENAME_NOREPLACE;
|
||||
break;
|
||||
case 'x':
|
||||
flags |= RENAME_EXCHANGE;
|
||||
break;
|
||||
case 'w':
|
||||
flags |= RENAME_WHITEOUT;
|
||||
break;
|
||||
default:
|
||||
exit_usage(argv);
|
||||
}
|
||||
} else if (!old_path) {
|
||||
old_path = argv[c];
|
||||
} else if (!new_path) {
|
||||
new_path = argv[c];
|
||||
} else {
|
||||
exit_usage(argv);
|
||||
}
|
||||
}
|
||||
|
||||
if (!old_path || !new_path) {
|
||||
printf("specify the correct directory path\n");
|
||||
errno = ENOENT;
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = renameat2(AT_FDCWD, old_path, AT_FDCWD, new_path, flags);
|
||||
if (ret == -1) {
|
||||
perror("Error");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -48,8 +48,9 @@ char buf[SZ];
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct scoutfs_ioctl_release ioctl_args = {0};
|
||||
struct scoutfs_ioctl_release rel = {0};
|
||||
struct scoutfs_ioctl_move_blocks mb;
|
||||
struct scoutfs_ioctl_stat_more stm;
|
||||
struct sub_tmp_info sub_tmps[8];
|
||||
int tot_size = 0;
|
||||
char *dest_file;
|
||||
@@ -111,12 +112,19 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// release everything in dest file
|
||||
ioctl_args.offset = 0;
|
||||
ioctl_args.length = tot_size;
|
||||
ioctl_args.data_version = 0;
|
||||
// get current data_version after fallocate's size extensions
|
||||
ret = ioctl(dest_fd, SCOUTFS_IOC_STAT_MORE, &stm);
|
||||
if (ret < 0) {
|
||||
perror("stat_more ioctl error");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ret = ioctl(dest_fd, SCOUTFS_IOC_RELEASE, &ioctl_args);
|
||||
// release everything in dest file
|
||||
rel.offset = 0;
|
||||
rel.length = tot_size;
|
||||
rel.data_version = stm.data_version;
|
||||
|
||||
ret = ioctl(dest_fd, SCOUTFS_IOC_RELEASE, &rel);
|
||||
if (ret < 0) {
|
||||
perror("error");
|
||||
exit(1);
|
||||
@@ -130,7 +138,7 @@ int main(int argc, char **argv)
|
||||
mb.from_off = 0;
|
||||
mb.len = sub_tmp->length;
|
||||
mb.to_off = sub_tmp->offset;
|
||||
mb.data_version = 0;
|
||||
mb.data_version = stm.data_version;
|
||||
mb.flags = SCOUTFS_IOC_MB_STAGE;
|
||||
|
||||
ret = ioctl(dest_fd, SCOUTFS_IOC_MOVE_BLOCKS, &mb);
|
||||
|
||||
@@ -197,4 +197,13 @@ scoutfs walk-inodes -p "$T_M0" -- data_seq 0 -1 > "$T_TMP.0"
|
||||
scoutfs walk-inodes -p "$T_M1" -- data_seq 0 -1 > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
|
||||
echo "== concurrent creates make one file"
|
||||
mkdir "$T_D0/concurrent"
|
||||
for i in $(t_fs_nrs); do
|
||||
eval p="\$T_D${i}/concurrent/one-file"
|
||||
touch "$p" 2>&1 > "$T_TMP.multi-create.$i" &
|
||||
done
|
||||
wait
|
||||
ls "$T_D0/concurrent"
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -5,57 +5,18 @@
|
||||
# persistent blocks to create stable block reading scenarios. Instead
|
||||
# we use triggers to exercise how readers encounter stale blocks.
|
||||
#
|
||||
# Trigger retries in the block cache by calling scoutfs df
|
||||
# which in turn will call scoutfs_ioctl_alloc_detail. This
|
||||
# is guaranteed to exist, which will force block cache reads.
|
||||
|
||||
t_require_commands touch setfattr getfattr
|
||||
echo "== Issue scoutfs df to force block reads to trigger stale invalidation/retry"
|
||||
nr=0
|
||||
|
||||
inc_wrap_fs_nr()
|
||||
{
|
||||
local nr="$(($1 + 1))"
|
||||
old=$(t_counter block_cache_remove_stale $nr)
|
||||
t_trigger_arm_silent block_remove_stale $nr
|
||||
|
||||
if [ "$nr" == "$T_NR_MOUNTS" ]; then
|
||||
nr=0
|
||||
fi
|
||||
scoutfs df -p "$T_M0" > /dev/null
|
||||
|
||||
echo $nr
|
||||
}
|
||||
|
||||
GETFATTR="getfattr --absolute-names"
|
||||
SETFATTR="setfattr"
|
||||
|
||||
echo "== create shared test file"
|
||||
touch "$T_D0/file"
|
||||
$SETFATTR -n user.xat -v 0 "$T_D0/file"
|
||||
|
||||
#
|
||||
# Trigger retries in the block cache as we bounce xattr values around
|
||||
# between sequential pairs of mounts. This is a little silly because if
|
||||
# either of the mounts are the server then they'll almost certaily have
|
||||
# their trigger fired prematurely by message handling btree calls while
|
||||
# working with the t_ helpers long before we work with the xattrs. But
|
||||
# the block cache stale retry path is still being exercised.
|
||||
#
|
||||
echo "== set and get xattrs between mount pairs while retrying"
|
||||
set_nr=0
|
||||
get_nr=$(inc_wrap_fs_nr $set_nr)
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
eval set_file="\$T_D${set_nr}/file"
|
||||
eval get_file="\$T_D${get_nr}/file"
|
||||
|
||||
old_set=$(t_counter block_cache_remove_stale $set_nr)
|
||||
old_get=$(t_counter block_cache_remove_stale $get_nr)
|
||||
|
||||
t_trigger_arm_silent block_remove_stale $set_nr
|
||||
t_trigger_arm_silent block_remove_stale $get_nr
|
||||
|
||||
$SETFATTR -n user.xat -v $i "$set_file"
|
||||
$GETFATTR -n user.xat "$get_file" 2>&1 | t_filter_fs
|
||||
|
||||
t_counter_diff_changed block_cache_remove_stale $old_set $set_nr
|
||||
t_counter_diff_changed block_cache_remove_stale $old_get $get_nr
|
||||
|
||||
set_nr="$get_nr"
|
||||
get_nr=$(inc_wrap_fs_nr $set_nr)
|
||||
done
|
||||
t_counter_diff_changed block_cache_remove_stale $old $nr
|
||||
|
||||
t_pass
|
||||
|
||||
61
tests/tests/client-unmount-recovery.sh
Normal file
61
tests/tests/client-unmount-recovery.sh
Normal file
@@ -0,0 +1,61 @@
|
||||
#
|
||||
# Unmount Server and unmount a client as it's replaying to a remaining server
|
||||
#
|
||||
|
||||
majority_nr=$(t_majority_count)
|
||||
quorum_nr=$T_QUORUM
|
||||
|
||||
test "$quorum_nr" == "$majority_nr" && \
|
||||
t_skip "all quorum members make up majority, need more mounts to unmount"
|
||||
|
||||
test "$T_NR_MOUNTS" -lt "$T_QUORUM" && \
|
||||
t_skip "Need enough non-quorum clients to unmount"
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
mounted[$i]=1
|
||||
done
|
||||
|
||||
LENGTH=60
|
||||
echo "== ${LENGTH}s of unmounting non-quorum clients during recovery"
|
||||
END=$((SECONDS + LENGTH))
|
||||
while [ "$SECONDS" -lt "$END" ]; do
|
||||
sv=$(t_server_nr)
|
||||
rid=$(t_mount_rid $sv)
|
||||
echo "sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_umount $sv &
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$i" -ge "$quorum_nr" ]; then
|
||||
t_umount $i &
|
||||
echo "umount $i pid $pid quo $quorum_nr" \
|
||||
>> $T_TMP.log
|
||||
mounted[$i]=0
|
||||
fi
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
t_mount $sv &
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "${mounted[$i]}" == 0 ]; then
|
||||
t_mount $i &
|
||||
fi
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
declare RID_LIST=$(cat /sys/fs/scoutfs/*/rid | sort -u)
|
||||
read -a rid_arr <<< $RID_LIST
|
||||
|
||||
declare LOCK_LIST=$(cut -d' ' -f 5 /sys/kernel/debug/scoutfs/*/server_locks | sort -u)
|
||||
read -a lock_arr <<< $LOCK_LIST
|
||||
|
||||
for i in "${lock_arr[@]}"; do
|
||||
if [[ ! " ${rid_arr[*]} " =~ " $i " ]]; then
|
||||
t_fail "RID($i): exists when not mounted"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
t_pass
|
||||
100
tests/tests/enospc.sh
Normal file
100
tests/tests/enospc.sh
Normal file
@@ -0,0 +1,100 @@
|
||||
#
|
||||
# test hititng enospc by filling with data or metadata and
|
||||
# then recovering by removing what we filled.
|
||||
#
|
||||
|
||||
# Type Size Total Used Free Use%
|
||||
#MetaData 64KB 1048576 32782 1015794 3
|
||||
# Data 4KB 16777152 0 16777152 0
|
||||
free_blocks() {
|
||||
local md="$1"
|
||||
local mnt="$2"
|
||||
scoutfs df -p "$mnt" | awk '($1 == "'$md'") { print $5; exit }'
|
||||
}
|
||||
|
||||
t_require_commands scoutfs stat fallocate createmany
|
||||
|
||||
echo "== prepare directories and files"
|
||||
for n in $(t_fs_nrs); do
|
||||
eval path="\$T_D${n}/dir-$n/file-$n"
|
||||
mkdir -p $(dirname $path)
|
||||
touch $path
|
||||
done
|
||||
sync
|
||||
|
||||
echo "== fallocate until enospc"
|
||||
before=$(free_blocks Data "$T_M0")
|
||||
finished=0
|
||||
while [ $finished != 1 ]; do
|
||||
for n in $(t_fs_nrs); do
|
||||
eval path="\$T_D${n}/dir-$n/file-$n"
|
||||
off=$(stat -c "%s" "$path")
|
||||
|
||||
LC_ALL=C fallocate -o $off -l 128MiB "$path" > $T_TMP.fallocate 2>&1
|
||||
err="$?"
|
||||
|
||||
if grep -qi "no space" $T_TMP.fallocate; then
|
||||
finished=1
|
||||
break
|
||||
fi
|
||||
if [ "$err" != "0" ]; then
|
||||
t_fail "fallocate failed with $err"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "== remove all the files and verify free data blocks"
|
||||
for n in $(t_fs_nrs); do
|
||||
eval dir="\$T_D${n}/dir-$n"
|
||||
rm -rf "$dir"
|
||||
done
|
||||
sync
|
||||
after=$(free_blocks Data "$T_M0")
|
||||
# nothing else should be modifying data blocks
|
||||
test "$before" == "$after" || \
|
||||
t_fail "$after free data blocks after rm, expected $before"
|
||||
|
||||
# XXX this is all pretty manual, would be nice to have helpers
|
||||
echo "== make small meta fs"
|
||||
# meta device just big enough for reserves and the metadata we'll fill
|
||||
scoutfs mkfs -A -f -Q 0,127.0.0.1,53000 -m 10G "$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 || \
|
||||
t_fail "mkfs failed"
|
||||
SCR="/mnt/scoutfs.enospc"
|
||||
mkdir -p "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 \
|
||||
"$T_EX_DATA_DEV" "$SCR"
|
||||
|
||||
echo "== create large xattrs until we fill up metadata"
|
||||
mkdir -p "$SCR/xattrs"
|
||||
|
||||
for f in $(seq 1 100000); do
|
||||
file="$SCR/xattrs/file-$f"
|
||||
touch "$file"
|
||||
|
||||
LC_ALL=C create_xattr_loop -c 1000 -n user.scoutfs-enospc -p "$file" -s 65535 > $T_TMP.cxl 2>&1
|
||||
err="$?"
|
||||
|
||||
if grep -qi "no space" $T_TMP.cxl; then
|
||||
echo "enospc at f $f" >> $T_TMP.cxl
|
||||
break
|
||||
fi
|
||||
if [ "$err" != "0" ]; then
|
||||
t_fail "create_xattr_loop failed with $err"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "== remove files with xattrs after enospc"
|
||||
rm -rf "$SCR/xattrs"
|
||||
|
||||
echo "== make sure we can create again"
|
||||
file="$SCR/file-after"
|
||||
touch $file
|
||||
setfattr -n user.scoutfs-enospc -v 1 "$file"
|
||||
sync
|
||||
rm -f "$file"
|
||||
|
||||
echo "== cleanup small meta fs"
|
||||
umount "$SCR"
|
||||
rmdir "$SCR"
|
||||
|
||||
t_pass
|
||||
127
tests/tests/fence-and-reclaim.sh
Normal file
127
tests/tests/fence-and-reclaim.sh
Normal file
@@ -0,0 +1,127 @@
|
||||
#
|
||||
# Fence nodes and reclaim their resources.
|
||||
#
|
||||
|
||||
t_require_commands sleep touch grep sync scoutfs
|
||||
t_require_mounts 2
|
||||
|
||||
#
|
||||
# Make sure that all mounts can read the results of a write from each
|
||||
# mount. And make sure that the greatest of all the written seqs is
|
||||
# visible after the writes were commited by remote reads.
|
||||
#
|
||||
check_read_write()
|
||||
{
|
||||
local expected
|
||||
local greatest=0
|
||||
local seq
|
||||
local path
|
||||
local saw
|
||||
local w
|
||||
local r
|
||||
|
||||
for w in $(t_fs_nrs); do
|
||||
expected="$w wrote at $(date --rfc-3339=ns)"
|
||||
eval path="\$T_D${w}/written"
|
||||
echo "$expected" > "$path"
|
||||
|
||||
seq=$(scoutfs stat -s meta_seq $path)
|
||||
if [ "$seq" -gt "$greatest" ]; then
|
||||
greatest=$seq
|
||||
fi
|
||||
|
||||
for r in $(t_fs_nrs); do
|
||||
eval path="\$T_D${r}/written"
|
||||
saw=$(cat "$path")
|
||||
if [ "$saw" != "$expected" ]; then
|
||||
echo "mount $r read '$saw' after mount $w wrote '$expected'"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
seq=$(scoutfs statfs -s committed_seq -p $T_D0)
|
||||
if [ "$seq" -lt "$greatest" ]; then
|
||||
echo "committed_seq $seq less than greatest $greatest"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "== make sure all mounts can see each other"
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount one client, connection timeout, fence nop, mount"
|
||||
cl=$(t_first_client_nr)
|
||||
sv=$(t_server_nr)
|
||||
rid=$(t_mount_rid $cl)
|
||||
echo "cl $cl sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_force_umount $cl
|
||||
# wait for client reconnection to timeout
|
||||
while grep -q $rid $(t_debugfs_path $sv)/connections; do
|
||||
sleep .5
|
||||
done
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
t_mount $cl
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount all non-server, connection timeout, fence nop, mount"
|
||||
sv=$(t_server_nr)
|
||||
pattern="nonsense"
|
||||
sync
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
continue;
|
||||
fi
|
||||
|
||||
rid=$(t_mount_rid $cl)
|
||||
pattern="$pattern|$rid"
|
||||
echo "cl $cl sv $sv rid $rid" >> "$T_TMP.log"
|
||||
|
||||
t_force_umount $cl
|
||||
done
|
||||
|
||||
# wait for all client reconnections to timeout
|
||||
while egrep -q "($pattern)" $(t_debugfs_path $sv)/connections; do
|
||||
sleep .5
|
||||
done
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
# remount all the clients
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
continue;
|
||||
fi
|
||||
t_mount $cl
|
||||
done
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount server, quorum elects new leader, fence nop, mount"
|
||||
sv=$(t_server_nr)
|
||||
rid=$(t_mount_rid $sv)
|
||||
echo "sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_force_umount $sv
|
||||
t_wait_for_leader
|
||||
# wait until new server is done fencing unmounted leader rid
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
t_mount $sv
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount everything, new server fences all previous"
|
||||
sync
|
||||
for nr in $(t_fs_nrs); do
|
||||
t_force_umount $nr
|
||||
done
|
||||
t_mount_all
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
check_read_write
|
||||
|
||||
t_pass
|
||||
@@ -23,9 +23,7 @@ else
|
||||
NR_MNTS=$T_NR_MOUNTS
|
||||
fi
|
||||
|
||||
# test until final op mount dir wraps
|
||||
while [ ${op_mnt[$NR_OPS]} == 0 ]; do
|
||||
|
||||
while : ; do
|
||||
# sequentially perform each op from its mount dir
|
||||
for op in $(seq 0 $((NR_OPS - 1))); do
|
||||
m=${op_mnt[$op]}
|
||||
@@ -45,7 +43,7 @@ while [ ${op_mnt[$NR_OPS]} == 0 ]; do
|
||||
|
||||
# advance through mnt nrs for each op
|
||||
i=0
|
||||
while [ ${op_mnt[$NR_OPS]} == 0 ]; do
|
||||
while [ $i -lt $NR_OPS ]; do
|
||||
((op_mnt[$i]++))
|
||||
if [ ${op_mnt[$i]} -ge $NR_MNTS ]; then
|
||||
op_mnt[$i]=0
|
||||
@@ -54,6 +52,9 @@ while [ ${op_mnt[$NR_OPS]} == 0 ]; do
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# done when the last op's mnt nr wrapped
|
||||
[ $i -ge $NR_OPS ] && break
|
||||
done
|
||||
|
||||
t_pass
|
||||
|
||||
78
tests/tests/orphan-inodes.sh
Normal file
78
tests/tests/orphan-inodes.sh
Normal file
@@ -0,0 +1,78 @@
|
||||
#
|
||||
# make sure we clean up orphaned inodes
|
||||
#
|
||||
|
||||
t_require_commands sleep touch sync stat handle_cat kill rm
|
||||
t_require_mounts 2
|
||||
|
||||
#
|
||||
# usually bash prints an annoying output message when jobs
|
||||
# are killed. We can avoid that by redirecting stderr for
|
||||
# the bash process when it reaps the jobs that are killed.
|
||||
#
|
||||
silent_kill() {
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
kill "$@"
|
||||
wait "$@"
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
}
|
||||
|
||||
#
|
||||
# We don't have a great way to test that inode items still exist. We
|
||||
# don't prevent opening handles with nlink 0 today, so we'll use that.
|
||||
# This would have to change to some other method.
|
||||
#
|
||||
inode_exists()
|
||||
{
|
||||
local ino="$1"
|
||||
|
||||
scoutfs get-allocated-inos -i "$ino" -s -p "$T_M0" > $T_TMP.inos.log 2>&1
|
||||
test "$?" == 0 -a "$(head -1 $T_TMP.inos.log)" == "$ino"
|
||||
}
|
||||
|
||||
echo "== test our inode existance function"
|
||||
path="$T_D0/file"
|
||||
touch "$path"
|
||||
ino=$(stat -c "%i" "$path")
|
||||
inode_exists $ino || echo "$ino didn't exist"
|
||||
|
||||
echo "== unlinked and opened inodes still exist"
|
||||
sleep 1000000 < "$path" &
|
||||
pid="$!"
|
||||
rm -f "$path"
|
||||
inode_exists $ino || echo "$ino didn't exist"
|
||||
|
||||
echo "== orphan from failed evict deletion is picked up"
|
||||
# pending kill signal stops evict from getting locks and deleting
|
||||
silent_kill $pid
|
||||
sleep 55
|
||||
inode_exists $ino && echo "$ino still exists"
|
||||
|
||||
echo "== orphaned inos in all mounts all deleted"
|
||||
pids=""
|
||||
inos=""
|
||||
for nr in $(t_fs_nrs); do
|
||||
eval path="\$T_D${nr}/file-$nr"
|
||||
touch "$path"
|
||||
inos="$inos $(stat -c %i $path)"
|
||||
sleep 1000000 < "$path" &
|
||||
pids="$pids $!"
|
||||
rm -f "$path"
|
||||
done
|
||||
sync
|
||||
silent_kill $pids
|
||||
for nr in $(t_fs_nrs); do
|
||||
t_force_umount $nr
|
||||
done
|
||||
t_mount_all
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
# wait for orphan scans to run
|
||||
sleep 55
|
||||
for ino in $inos; do
|
||||
inode_exists $ino && echo "$ino still exists"
|
||||
done
|
||||
|
||||
t_pass
|
||||
37
tests/tests/renameat2-noreplace.sh
Normal file
37
tests/tests/renameat2-noreplace.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
#
|
||||
# simple renameat2 NOREPLACE unit test
|
||||
#
|
||||
|
||||
t_require_commands dumb_renameat2
|
||||
t_require_mounts 2
|
||||
|
||||
echo "=== renameat2 noreplace flag test"
|
||||
|
||||
# give each mount their own dir (lock group) to minimize create contention
|
||||
mkdir $T_M0/dir0
|
||||
mkdir $T_M1/dir1
|
||||
|
||||
echo "=== run two asynchronous calls to renameat2 NOREPLACE"
|
||||
for i in $(seq 0 100); do
|
||||
# prepare inputs in isolation
|
||||
touch "$T_M0/dir0/old0"
|
||||
touch "$T_M1/dir1/old1"
|
||||
|
||||
# race doing noreplace renames, both can't succeed
|
||||
dumb_renameat2 -n "$T_M0/dir0/old0" "$T_M0/dir0/sharednew" 2> /dev/null &
|
||||
pid0=$!
|
||||
dumb_renameat2 -n "$T_M1/dir1/old1" "$T_M1/dir0/sharednew" 2> /dev/null &
|
||||
pid1=$!
|
||||
|
||||
wait $pid0
|
||||
rc0=$?
|
||||
wait $pid1
|
||||
rc1=$?
|
||||
|
||||
test "$rc0" == 0 -a "$rc1" == 0 && t_fail "both renames succeeded"
|
||||
|
||||
# blow away possible files for either race outcome
|
||||
rm -f "$T_M0/dir0/old0" "$T_M1/dir1/old1" "$T_M0/dir0/sharednew" "$T_M1/dir1/sharednew"
|
||||
done
|
||||
|
||||
t_pass
|
||||
149
tests/tests/resize-devices.sh
Normal file
149
tests/tests/resize-devices.sh
Normal file
@@ -0,0 +1,149 @@
|
||||
#
|
||||
# Some basic tests of online resizing metadata and data devices.
|
||||
#
|
||||
|
||||
statfs_total() {
|
||||
local single="total_$1_blocks"
|
||||
local mnt="$2"
|
||||
|
||||
scoutfs statfs -s $single -p "$mnt"
|
||||
}
|
||||
|
||||
df_free() {
|
||||
local md="$1"
|
||||
local mnt="$2"
|
||||
|
||||
scoutfs df -p "$mnt" | awk '($1 == "'$md'") { print $5; exit }'
|
||||
}
|
||||
|
||||
same_totals() {
|
||||
cur_meta_tot=$(statfs_total meta "$SCR")
|
||||
cur_data_tot=$(statfs_total data "$SCR")
|
||||
|
||||
test "$cur_meta_tot" == "$exp_meta_tot" || \
|
||||
t_fail "cur total_meta_blocks $cur_meta_tot != expected $exp_meta_tot"
|
||||
test "$cur_data_tot" == "$exp_data_tot" || \
|
||||
t_fail "cur total_data_blocks $cur_data_tot != expected $exp_data_tot"
|
||||
}
|
||||
|
||||
#
|
||||
# make sure that the specified devices have grown by doubling. The
|
||||
# total blocks can be tested exactly but the df reported total needs
|
||||
# some slop to account for reserved blocks and concurrent allocation.
|
||||
#
|
||||
devices_grew() {
|
||||
cur_meta_tot=$(statfs_total meta "$SCR")
|
||||
cur_data_tot=$(statfs_total data "$SCR")
|
||||
cur_meta_df=$(df_free MetaData "$SCR")
|
||||
cur_data_df=$(df_free Data "$SCR")
|
||||
|
||||
local grow_meta_tot=$(echo "$exp_meta_tot * 2" | bc)
|
||||
local grow_data_tot=$(echo "$exp_data_tot * 2" | bc)
|
||||
local grow_meta_df=$(echo "($exp_meta_df * 1.95)/1" | bc)
|
||||
local grow_data_df=$(echo "($exp_data_df * 1.95)/1" | bc)
|
||||
|
||||
if [ "$1" == "meta" ]; then
|
||||
test "$cur_meta_tot" == "$grow_meta_tot" || \
|
||||
t_fail "cur total_meta_blocks $cur_meta_tot != grown $grow_meta_tot"
|
||||
test "$cur_meta_df" -lt "$grow_meta_df" && \
|
||||
t_fail "cur meta df total $cur_meta_df < grown $grow_meta_df"
|
||||
exp_meta_tot=$cur_meta_tot
|
||||
exp_meta_df=$cur_meta_df
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ "$1" == "data" ]; then
|
||||
test "$cur_data_tot" == "$grow_data_tot" || \
|
||||
t_fail "cur total_data_blocks $cur_data_tot != grown $grow_data_tot"
|
||||
test "$cur_data_df" -lt "$grow_data_df" && \
|
||||
t_fail "cur data df total $cur_data_df < grown $grow_data_df"
|
||||
exp_data_tot=$cur_data_tot
|
||||
exp_data_df=$cur_data_df
|
||||
fi
|
||||
}
|
||||
|
||||
# first calculate small mkfs based on device size
|
||||
size_meta=$(blockdev --getsize64 "$T_EX_META_DEV")
|
||||
size_data=$(blockdev --getsize64 "$T_EX_DATA_DEV")
|
||||
quarter_meta=$(echo "$size_meta / 4" | bc)
|
||||
quarter_data=$(echo "$size_data / 4" | bc)
|
||||
|
||||
# XXX this is all pretty manual, would be nice to have helpers
|
||||
echo "== make initial small fs"
|
||||
scoutfs mkfs -A -f -Q 0,127.0.0.1,53000 -m $quarter_meta -d $quarter_data \
|
||||
"$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 || \
|
||||
t_fail "mkfs failed"
|
||||
SCR="/mnt/scoutfs.enospc"
|
||||
mkdir -p "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 \
|
||||
"$T_EX_DATA_DEV" "$SCR"
|
||||
|
||||
# then calculate sizes based on blocks that mkfs used
|
||||
quarter_meta=$(echo "$(statfs_total meta "$SCR") * 64 * 1024" | bc)
|
||||
quarter_data=$(echo "$(statfs_total data "$SCR") * 4 * 1024" | bc)
|
||||
whole_meta=$(echo "$quarter_meta * 4" | bc)
|
||||
whole_data=$(echo "$quarter_data * 4" | bc)
|
||||
outsize_meta=$(echo "$whole_meta * 2" | bc)
|
||||
outsize_data=$(echo "$whole_data * 2" | bc)
|
||||
half_meta=$(echo "$whole_meta / 2" | bc)
|
||||
half_data=$(echo "$whole_data / 2" | bc)
|
||||
shrink_meta=$(echo "$quarter_meta / 2" | bc)
|
||||
shrink_data=$(echo "$quarter_data / 2" | bc)
|
||||
|
||||
# and save expected values for checks
|
||||
exp_meta_tot=$(statfs_total meta "$SCR")
|
||||
exp_meta_df=$(df_free MetaData "$SCR")
|
||||
exp_data_tot=$(statfs_total data "$SCR")
|
||||
exp_data_df=$(df_free Data "$SCR")
|
||||
|
||||
echo "== 0s do nothing"
|
||||
scoutfs resize-devices -p "$SCR"
|
||||
scoutfs resize-devices -p "$SCR" -m 0
|
||||
scoutfs resize-devices -p "$SCR" -d 0
|
||||
scoutfs resize-devices -p "$SCR" -m 0 -d 0
|
||||
|
||||
echo "== shrinking fails"
|
||||
scoutfs resize-devices -p "$SCR" -m $shrink_meta
|
||||
scoutfs resize-devices -p "$SCR" -d $shrink_data
|
||||
scoutfs resize-devices -p "$SCR" -m $shrink_meta -d $shrink_data
|
||||
same_totals
|
||||
|
||||
echo "== existing sizes do nothing"
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_meta
|
||||
scoutfs resize-devices -p "$SCR" -d $quarter_data
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_meta -d $quarter_data
|
||||
same_totals
|
||||
|
||||
echo "== growing outside device fails"
|
||||
scoutfs resize-devices -p "$SCR" -m $outsize_meta
|
||||
scoutfs resize-devices -p "$SCR" -d $outsize_data
|
||||
scoutfs resize-devices -p "$SCR" -m $outsize_meta -d $outsize_data
|
||||
same_totals
|
||||
|
||||
echo "== resizing meta works"
|
||||
scoutfs resize-devices -p "$SCR" -m $half_meta
|
||||
devices_grew meta
|
||||
|
||||
echo "== resizing data works"
|
||||
scoutfs resize-devices -p "$SCR" -d $half_data
|
||||
devices_grew data
|
||||
|
||||
echo "== shrinking back fails"
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_meta
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_data
|
||||
same_totals
|
||||
|
||||
echo "== resizing again does nothing"
|
||||
scoutfs resize-devices -p "$SCR" -m $half_meta
|
||||
scoutfs resize-devices -p "$SCR" -m $half_data
|
||||
same_totals
|
||||
|
||||
echo "== resizing to full works"
|
||||
scoutfs resize-devices -p "$SCR" -m $whole_meta -d $whole_data
|
||||
devices_grew meta data
|
||||
|
||||
echo "== cleanup extra fs"
|
||||
umount "$SCR"
|
||||
rmdir "$SCR"
|
||||
|
||||
t_pass
|
||||
@@ -46,6 +46,35 @@ print_and_run() {
|
||||
"$@" || echo "returned nonzero status: $?"
|
||||
}
|
||||
|
||||
# fill a buffer with strings that identify their byte offset
|
||||
offs=""
|
||||
for o in $(seq 0 7 $((65535 - 7))); do
|
||||
offs+="$(printf "[%5u]" $o)"
|
||||
done
|
||||
|
||||
change_val_sizes() {
|
||||
local name="$1"
|
||||
local file="$2"
|
||||
local from="$3"
|
||||
local to="$4"
|
||||
|
||||
while : ; do
|
||||
setfattr -x "$name" "$file" > /dev/null 2>&1
|
||||
setfattr -n "$name" -v "${offs:0:$from}" "$file"
|
||||
setfattr -n "$name" -v "${offs:0:$to}" "$file"
|
||||
if ! diff -u <(echo -n "${offs:0:$to}") <(getfattr --absolute-names --only-values -n "$name" $file) ; then
|
||||
echo "setting $name from $from to $to failed"
|
||||
fi
|
||||
|
||||
if [ $from == $3 ]; then
|
||||
from=$4
|
||||
to=$3
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
echo "=== XATTR_ flag combinations"
|
||||
touch "$FILE"
|
||||
print_and_run dumb_setxattr -p "$FILE" -n user.test -v val -c -r
|
||||
@@ -80,4 +109,17 @@ for i in $(seq 1 $NR); do
|
||||
test_xattr_lengths $name_len $val_len
|
||||
done
|
||||
|
||||
echo "=== alternate val size between interesting sizes"
|
||||
name="user.test"
|
||||
ITEM=896
|
||||
HDR=$((8 + 9))
|
||||
# one full item apart
|
||||
change_val_sizes $name "$FILE" $(((ITEM * 2) - HDR)) $(((ITEM * 3) - HDR))
|
||||
# multiple full items apart
|
||||
change_val_sizes $name "$FILE" $(((ITEM * 6) - HDR)) $(((ITEM * 9) - HDR))
|
||||
# item boundary fence posts
|
||||
change_val_sizes $name "$FILE" $(((ITEM * 5) - HDR - 1)) $(((ITEM * 13) - HDR + 1))
|
||||
# min and max
|
||||
change_val_sizes $name "$FILE" 1 65535
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -17,8 +17,10 @@ diff_srch_find()
|
||||
local n="$1"
|
||||
|
||||
sync
|
||||
scoutfs search-xattrs "$n" -p "$T_M0" > "$T_TMP.srch"
|
||||
find_xattrs -d "$T_D0" -m "$T_M0" -n "$n" > "$T_TMP.find"
|
||||
scoutfs search-xattrs "$n" -p "$T_M0" > "$T_TMP.srch" || \
|
||||
t_fail "search-xattrs failed"
|
||||
find_xattrs -d "$T_D0" -m "$T_M0" -n "$n" > "$T_TMP.find" || \
|
||||
t_fail "find_xattrs failed"
|
||||
|
||||
diff -u "$T_TMP.srch" "$T_TMP.find"
|
||||
}
|
||||
@@ -40,6 +42,31 @@ echo "== remove xattr with files"
|
||||
rm -f "$T_D0/"{create,update}
|
||||
diff_srch_find scoutfs.srch.test
|
||||
|
||||
echo "== trigger small log merges by rotating single block with unmount"
|
||||
sv=$(t_server_nr)
|
||||
i=1
|
||||
while [ "$i" -lt "8" ]; do
|
||||
for nr in $(t_fs_nrs); do
|
||||
# not checking, can go over limit by fs_nrs
|
||||
((i++))
|
||||
|
||||
if [ $nr == $sv ]; then
|
||||
continue;
|
||||
fi
|
||||
|
||||
eval path="\$T_D${nr}/single-block-$i"
|
||||
touch "$path"
|
||||
setfattr -n scoutfs.srch.single-block-logs -v $i "$path"
|
||||
t_umount $nr
|
||||
t_mount $nr
|
||||
|
||||
((i++))
|
||||
done
|
||||
done
|
||||
# wait for srch compaction worker delay
|
||||
sleep 10
|
||||
rm -rf "$T_D0/single-block-*"
|
||||
|
||||
echo "== create entries in current log"
|
||||
DIR="$T_D0/dir"
|
||||
NR=$((LOG / 4))
|
||||
|
||||
126
tests/tests/totl-xattr-tag.sh
Normal file
126
tests/tests/totl-xattr-tag.sh
Normal file
@@ -0,0 +1,126 @@
|
||||
t_require_commands touch rm setfattr scoutfs find_xattrs
|
||||
|
||||
read_xattr_totals()
|
||||
{
|
||||
sync
|
||||
scoutfs read-xattr-totals -p "$T_M0"
|
||||
}
|
||||
|
||||
echo "== single file"
|
||||
touch "$T_D0/file-1"
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v 1 "$T_D0/file-1" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.4.5.6 -v 1 "$T_D0/file-1" 2>&1 | t_filter_fs
|
||||
read_xattr_totals
|
||||
|
||||
echo "== multiple files add up"
|
||||
touch "$T_D0/file-2"
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v 1 "$T_D0/file-2" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.4.5.6 -v 1 "$T_D0/file-2" 2>&1 | t_filter_fs
|
||||
read_xattr_totals
|
||||
|
||||
echo "== removing xattr updates total"
|
||||
setfattr -x scoutfs.totl.test.4.5.6 "$T_D0/file-2" 2>&1 | t_filter_fs
|
||||
read_xattr_totals
|
||||
|
||||
echo "== updating xattr updates total"
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v 10 "$T_D0/file-2" 2>&1 | t_filter_fs
|
||||
read_xattr_totals
|
||||
|
||||
echo "== removing files update total"
|
||||
rm -f "$T_D0/file-1"
|
||||
read_xattr_totals
|
||||
rm -f "$T_D0/file-2"
|
||||
read_xattr_totals
|
||||
|
||||
echo "== multiple files/names in one transaction"
|
||||
for a in $(seq 1 10); do
|
||||
touch "$T_D0/file-$a"
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v $a "$T_D0/file-$a" 2>&1 | t_filter_fs
|
||||
done
|
||||
read_xattr_totals
|
||||
rm -rf "$T_D0"/file-[0-9]*
|
||||
|
||||
echo "== testing invalid names"
|
||||
touch "$T_D0/invalid"
|
||||
setfattr -n scoutfs.totl.test... -v 10 "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test..2.3 -v 10 "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1..3 -v 10 "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1.2. -v 10 "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1 -v 10 "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1.2 -v 10 "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
|
||||
echo "== testing invalid values"
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v "+1" "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v "10." "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v "-" "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v "junk10" "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
setfattr -n scoutfs.totl.test.1.2.3 -v "10junk" "$T_D0/invalid" 2>&1 | t_filter_fs
|
||||
rm -f "$T_D0/invalid"
|
||||
|
||||
echo "== larger population that could merge"
|
||||
NR=5000
|
||||
TOTS=100
|
||||
CHECK=100
|
||||
PER_DIR=1000
|
||||
PER_FILE=10
|
||||
|
||||
declare -A totals counts
|
||||
LOTS="$T_D0/lots"
|
||||
|
||||
for i in $(seq 0 $PER_DIR $NR); do
|
||||
p="$LOTS/$((i / PER_DIR))"
|
||||
mkdir -p $p
|
||||
done
|
||||
for i in $(seq 0 $PER_FILE $NR); do
|
||||
p="$LOTS/$((i / PER_DIR))/file-$((i / PER_FILE))"
|
||||
touch $p
|
||||
done
|
||||
|
||||
for phase in create update remove; do
|
||||
for i in $(seq 0 $NR); do
|
||||
p="$LOTS/$((i / PER_DIR))/file-$((i / PER_FILE))"
|
||||
|
||||
t=$((i % TOTS))
|
||||
n="scoutfs.totl.test-$i.$t.0.0"
|
||||
|
||||
case $phase in
|
||||
create)
|
||||
v="$i"
|
||||
setfattr -n "$n" -v "$v" "$p" 2>&1 >> $T_TMP.sfa
|
||||
((totals[$t]+=$v))
|
||||
((counts[$t]++))
|
||||
;;
|
||||
update)
|
||||
v=$((i * 3))
|
||||
delta=$((i * 2))
|
||||
setfattr -n "$n" -v "$v" "$p" 2>&1 >> $T_TMP.sfa
|
||||
((totals[$t]+=$delta))
|
||||
;;
|
||||
remove)
|
||||
v=$((i * 3))
|
||||
setfattr -x "$n" "$p" 2>&1 >> $T_TMP.sfa
|
||||
((totals[$t]-=$v))
|
||||
((counts[$t]--))
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$i" -gt 0 -a "$((i % CHECK))" == "0" ]; then
|
||||
echo "checking $phase $i" > $T_TMP.check_arr
|
||||
echo "checking $phase $i" > $T_TMP.check_read
|
||||
|
||||
( for k in ${!totals[@]}; do
|
||||
echo "$k.0.0 = ${totals[$k]}, ${counts[$k]}"
|
||||
done ) | grep -v "= 0, 0$" | sort -n >> $T_TMP.check_arr
|
||||
|
||||
sync
|
||||
read_xattr_totals | sort -n >> $T_TMP.check_read
|
||||
|
||||
diff -u $T_TMP.check_arr $T_TMP.check_read || \
|
||||
t_fail "totals read didn't match expected arrays"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
rm -rf "$T_D0/merging"
|
||||
|
||||
t_pass
|
||||
@@ -60,13 +60,9 @@ EOF
|
||||
|
||||
cat << EOF > local.exclude
|
||||
generic/003 # missing atime update in buffered read
|
||||
generic/023 # renameat2 not implemented
|
||||
generic/024 # renameat2 not implemented
|
||||
generic/025 # renameat2 not implemented
|
||||
generic/029 # mmap missing
|
||||
generic/030 # mmap missing
|
||||
generic/075 # file content mismatch failures (fds, etc)
|
||||
generic/078 # renameat2 not implemented
|
||||
generic/080 # mmap missing
|
||||
generic/103 # enospc causes trans commit failures
|
||||
generic/105 # needs trigage: something about acls
|
||||
|
||||
94
utils/fenced/scoutfs-fenced
Executable file
94
utils/fenced/scoutfs-fenced
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
message_output()
|
||||
{
|
||||
printf "[%s] %s\n" "$(date '+%F %T.%N')" "$@"
|
||||
}
|
||||
|
||||
error_message()
|
||||
{
|
||||
message_output "$@" >&2
|
||||
}
|
||||
|
||||
error_exit()
|
||||
{
|
||||
error_message "$@, exiting"
|
||||
exit 1
|
||||
}
|
||||
|
||||
log_message()
|
||||
{
|
||||
message_output "$@"
|
||||
}
|
||||
|
||||
# restart if we catch hup to re-read the config
|
||||
hup_restart()
|
||||
{
|
||||
log_message "caught SIGHUP, restarting"
|
||||
exec "$@"
|
||||
}
|
||||
trap hup_restart SIGHUP
|
||||
|
||||
# defaults
|
||||
SCOUTFS_FENCED_CONFIG_FILE=${SCOUTFS_FENCED_CONFIG_FILE:-/etc/scoutfs/scoutfs-fenced.conf}
|
||||
SCOUTFS_FENCED_DELAY=2
|
||||
#SCOUTFS_FENCED_RUN
|
||||
#SCOUTFS_FENCED_RUN_ARGS
|
||||
|
||||
test -n "$SCOUTFS_FENCED_CONFIG_FILE" || \
|
||||
error_exit "SCOUTFS_FENCED_CONFIG_FILE isn't set"
|
||||
test -r "$SCOUTFS_FENCED_CONFIG_FILE" || \
|
||||
error_exit "SCOUTFS_FENCED_CONFIG_FILE isn't readable file"
|
||||
|
||||
log_message "reading config file $SCOUTFS_FENCED_CONFIG_FILE"
|
||||
|
||||
. "$SCOUTFS_FENCED_CONFIG_FILE" || \
|
||||
error_exit "error sourcing $SCOUTFS_FENCED_CONFIG_FILE as bash script"
|
||||
|
||||
for conf in "${!SCOUTFS_FENCED_@}"; do
|
||||
log_message " config var $conf=${!conf}"
|
||||
done
|
||||
|
||||
test -n "$SCOUTFS_FENCED_RUN" || \
|
||||
error_exit "SCOUTFS_FENCED_RUN must be set"
|
||||
test -x "$SCOUTFS_FENCED_RUN" || \
|
||||
error_exit "SCOUTFS_FENCED_RUN '$SCOUTFS_FENCED_RUN' isn't executable"
|
||||
|
||||
#
|
||||
# main loop watching for fence request across all filesystems
|
||||
#
|
||||
|
||||
while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
for fence in /sys/fs/scoutfs/*/fence/*; do
|
||||
# catches unmatched regex when no dirs
|
||||
if [ ! -d "$fence" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# skip requests that have been handled
|
||||
if [ $(cat "$fence/fenced") == 1 -o $(cat "$fence/error") == 1 ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
srv=$(basename $(dirname $(dirname $fence)))
|
||||
rid="$(cat $fence/rid)"
|
||||
ip="$(cat $fence/ipv4_addr)"
|
||||
reason="$(cat $fence/reason)"
|
||||
|
||||
log_message "server $srv fencing rid $rid at IP $ip for $reason"
|
||||
|
||||
# export _REQ_ vars for run to use
|
||||
export SCOUTFS_FENCED_REQ_RID="$rid"
|
||||
export SCOUTFS_FENCED_REQ_IP="$ip"
|
||||
|
||||
$run $SCOUTFS_FENCED_RUN_ARGS
|
||||
rc=$?
|
||||
if [ "$rc" != 0 ]; then
|
||||
log_message "server $srv fencing rid $rid saw error status $rc from $run"
|
||||
echo 1 > "$fence/error"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo 1 > "$fence/fenced"
|
||||
done
|
||||
done
|
||||
6
utils/fenced/scoutfs-fenced.conf.example
Normal file
6
utils/fenced/scoutfs-fenced.conf.example
Normal file
@@ -0,0 +1,6 @@
|
||||
# delay, in seconds, between each check for pending fence requests.
|
||||
SCOUTFS_FENCED_DELAY=1
|
||||
# path to executable to run to service fence request
|
||||
#SCOUTFS_FENCED_RUN=
|
||||
# arguments to pass to binary
|
||||
SCOUTFS_FENCED_RUN_ARGS=""
|
||||
11
utils/fenced/scoutfs-fenced.service
Normal file
11
utils/fenced/scoutfs-fenced.service
Normal file
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=ScoutFS fenced
|
||||
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
StartLimitBurst=5
|
||||
ExecStart=/usr/libexec/scoutfs-fenced/scoutfs-fenced
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
66
utils/man/scoutfs-fenced.8
Normal file
66
utils/man/scoutfs-fenced.8
Normal file
@@ -0,0 +1,66 @@
|
||||
.TH scoutfs-fenced 8
|
||||
.SH NAME
|
||||
scoutfs-fenced \- scoutfs fence request monitoring and dispatch daemon
|
||||
.SH DESCRIPTION
|
||||
The
|
||||
.B scoutfs-fenced
|
||||
daemon runs on hosts with mounts that are configured as quorum members
|
||||
and could create fence requests. It watches sysfs directories of
|
||||
mounted scoutfs volumes for the directories store requests
|
||||
to fence a mount.
|
||||
|
||||
.SH ENVIRONMENT
|
||||
scoutfs-fenced reads the
|
||||
.I SCOUTFS_FENCED_CONFIG_FILE
|
||||
environment variable for the path to the config file that contains its
|
||||
configuration. The file must be readable and is sourced as a bash
|
||||
script and is expected to set the following configuration variables.
|
||||
|
||||
.SH CONFIGURATION
|
||||
|
||||
.TP
|
||||
.B SCOUTFS_FENCED_DELAY
|
||||
The number of seconds to wait beteween checking for fence request
|
||||
directories in the sysfs directories of all mounts on the host.
|
||||
|
||||
.TP
|
||||
.B SCOUTFS_FENCED_RUN
|
||||
The path to the command to execute for each fence request. The file at
|
||||
the path must be executable.
|
||||
|
||||
.TP
|
||||
.B SCOUTFS_FENCED_RUN_ARGS
|
||||
The arguments that are unconditionally passed through to the run
|
||||
command.
|
||||
|
||||
.SH DAEMONIZING AND LOGGING
|
||||
|
||||
scoutfs-fenced runs in the foreground and writes to stderr and stdout.
|
||||
Disconnecting it from parents and redirecting its output are the
|
||||
responsibility of the host environment.
|
||||
|
||||
.SH RUN COMMAND INTERFACE
|
||||
|
||||
scoutfs-fenced sets enviroment variables for the run command with
|
||||
information about the mount that must be fenced:
|
||||
|
||||
.TP
|
||||
.B SCOUTFS_FENCED_REQ_RID
|
||||
The RID of the mount to be fenced.
|
||||
.TP
|
||||
.B SCOUTFS_FENCED_REQ_IP
|
||||
The dotted quad IPv4 address of the last connection from the mount.
|
||||
|
||||
.RE
|
||||
The return status of the run command indicates if the mount was
|
||||
fenced, or not. If the mount was successfully fenced then the command
|
||||
should return a 0 success status. If the run command returns a non-zero
|
||||
failure status then the request will be set as errored and the server
|
||||
will shut down. The next server that starts will create another fence
|
||||
request for the mount.
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR scoutfs (5),
|
||||
|
||||
.SH AUTHORS
|
||||
Zach Brown <zab@versity.com>
|
||||
@@ -1,6 +1,6 @@
|
||||
.TH scoutfs 5
|
||||
.SH NAME
|
||||
scoutfs \- overview and mount options for the scoutfs filesystem
|
||||
scoutfs \- high level overview of the scoutfs filesystem
|
||||
.SH DESCRIPTION
|
||||
A scoutfs filesystem is stored on two block devices. Multiple mounts of
|
||||
the filesystem are supported between hosts that share access to the
|
||||
@@ -34,7 +34,251 @@ the server for the filesystem if it is elected leader.
|
||||
The assigned number must match one of the slots defined with \-Q options
|
||||
when the filesystem was created with mkfs. If the number assigned
|
||||
doesn't match a number created during mkfs then the mount will fail.
|
||||
.SH FURTHER READING
|
||||
.SH VOLUME OPTIONS
|
||||
Volume options are persistent options which are stored in the super
|
||||
block in the metadata device and which apply to all mounts of the volume.
|
||||
.sp
|
||||
Volume options may be initially specified as the volume is created
|
||||
as described in the mkfs command in
|
||||
.BR scoutfs (8).
|
||||
.sp
|
||||
Volume options may be changed at runtime by writing to files in sysfs
|
||||
while the volume is mounted. Volume options are found in the
|
||||
volume_options/ directory with a file for each option. Reading the
|
||||
file provides the current setting of the option and an empty string
|
||||
is returned if the option is not set. To set the option, write
|
||||
the new value ofthe option to the file. To clear the option, write
|
||||
a blank line with a newline to the file. The write syscall will
|
||||
return an error if the set operation fails and a message will be written
|
||||
to the console.
|
||||
.sp
|
||||
The following volume options are supported:
|
||||
.TP
|
||||
.B data_alloc_zone_blocks=<zone size in 4KiB blocks>
|
||||
When the data_alloc_zone_blocks option is set the data device is
|
||||
logically divided into zones of equal length as specified by the value
|
||||
of the option. The size of the zones must be greater than a minimum
|
||||
allocation pool size, large enough to result in no more than 1024 zones,
|
||||
and not more than the total number of blocks in the data device.
|
||||
.sp
|
||||
When set, the server will try to provide each mount with free data
|
||||
extents that don't share a zone with other mounts. When a mount has free
|
||||
extents in a given zone the server will try and find more free extents
|
||||
in that zone. When the mount is not in a zone, or its zone has no more
|
||||
free extents, the server will try and find free extents in a zone that
|
||||
no other mount currently occupies. The result is to try and produce
|
||||
write streams where only one mount is writing into each zone.
|
||||
.SH FENCING
|
||||
.B scoutfs
|
||||
mounts coordinate exclusive access to shared resources through
|
||||
comminication with the mount that was elected leader.
|
||||
A mount can malfunction and stop participating at which point it needs
|
||||
to be safely isolated ("fenced off") from shared resources before other mounts can
|
||||
have their turn at exclusive access.
|
||||
.sp
|
||||
Only the elected leader can fence mounts. As the leader decides that a
|
||||
mount must be fenced, typically by timeouts expiring without
|
||||
comminication from the mount, it creates a fence request. Fence
|
||||
requests are visible as directories in the leader mount's sysfs
|
||||
directory. The fence request directory is named for the RID of the
|
||||
mount being fenced. The directory contains the following files:
|
||||
|
||||
.RS
|
||||
.TP
|
||||
.B elapsec_secs
|
||||
Reading this file gives the number of seconds that have passed since
|
||||
this fence request was created.
|
||||
.TP
|
||||
.B error
|
||||
This file contains 0 when the fence request is created. Userspace
|
||||
fencing agents write 1 into this file if they are unable to fence the
|
||||
mount. The volume can not make progress until the mount is fenced so
|
||||
this will cause the server to stop and another mount will be elected
|
||||
leader.
|
||||
.TP
|
||||
.B fenced
|
||||
This file contains 0 when the fence request is created. Userspace
|
||||
fencing agents write 1 into this file once the mount has been fenced.
|
||||
.TP
|
||||
.B ipv4_addr
|
||||
This file contains the dotted quad IPv4 peer address of the last
|
||||
connected socket from the mount. Userspace fencing agents can use this
|
||||
to find the host that contains the mount.
|
||||
.TP
|
||||
.B reason
|
||||
This file contains a text string that indicates the reason that the
|
||||
mount is being fenced:
|
||||
|
||||
.B client_recovery
|
||||
- During startup the server found persistent items recording the presence
|
||||
of a mount that didn't reconnect to the server in time.
|
||||
.sp
|
||||
.B client_reconnect
|
||||
- A mount disconnected from the server and didn't reconnect in time.
|
||||
.sp
|
||||
.B quorum_block_leader
|
||||
- As a leader was elected it read persistent blocks that indicated that
|
||||
a previous leader had not shut down and cleared their quorum block.
|
||||
.TP
|
||||
.B rid
|
||||
This file contains the hex string of the RID of the mount to be fenced.
|
||||
.RE
|
||||
|
||||
The request directories enable userspace processes to gather the
|
||||
information to find the host with the mount to fence, isolate the mount
|
||||
by whatever means are appropriate (f.e. cut off network and storage
|
||||
communication, force unmount the mount, isolate storage fabric ports,
|
||||
reboot the host) and write to the
|
||||
.I fenced
|
||||
file.
|
||||
.sp
|
||||
Once the
|
||||
.I fenced
|
||||
file is written to the server reclaims the resources
|
||||
associated with the fenced mount and resumes normal operations.
|
||||
.sp
|
||||
If the
|
||||
.I error
|
||||
file is written to then the server cannot make forward progress and
|
||||
shuts down. The request can similarly enter an errored state if enough
|
||||
time passes before userspace completes the request.
|
||||
|
||||
.SH EXTENDED ATTRIBUTE TAGS
|
||||
|
||||
.B scoutfs
|
||||
adds the
|
||||
.IB scoutfs.
|
||||
extended attribute namespace which uses a system of tags to extend the
|
||||
functionality of extended attributes. Immediately following the
|
||||
scoutfs. prefix are a series of tag words seperated by dots.
|
||||
Any text starting after the last recognized tag is considered the xattr
|
||||
name and is not parsed.
|
||||
.sp
|
||||
Tags may be combined in any order. Specifying a tag more than once
|
||||
will return an error. There is no explicit boundary between the end of
|
||||
tags and the start of the name so unknown or incorrect tags will be
|
||||
successfully parsed as part of the name of the xattr. Tags can only be
|
||||
created, updated, or removed with the CAP_SYS_ADMIN capability.
|
||||
|
||||
The following tags are currently supported:
|
||||
|
||||
.RS
|
||||
.TP
|
||||
.B .hide.
|
||||
Attributes with the .hide. tag are not visible to the
|
||||
.BR listxattr(2)
|
||||
system call. They will instead be included in the output of the
|
||||
.IB LISTXATTR_HIDDEN
|
||||
ioctl. This is meant to be used by archival management agents to store
|
||||
metadata that is bound to a specific volume and should not be
|
||||
transferred with the file by tools that read extended attributes, like
|
||||
.BR tar(1) .
|
||||
.TP
|
||||
.B .srch.
|
||||
Attributes with the .srch. tag are indexed so that they can be
|
||||
found by the
|
||||
.IB SEARCH_XATTRS
|
||||
ioctl. The search ioctl takes an extended attribute name and returns
|
||||
the inode number of all the inodes which contain an extended attribute
|
||||
with that name. The indexing structures behind .srch. tags are designed
|
||||
to efficiently handle a large number of .srch. attributes per file with
|
||||
no limits on the number of indexed files.
|
||||
.TP
|
||||
.B .totl.
|
||||
Attributes with the .totl. flag are used to efficiently maintain counts
|
||||
across all files in the system. The attribute's name must end in three
|
||||
64bit values seperated by dots that specify the global total that the
|
||||
extended attribute will contribute to. The value of the extended
|
||||
attribute is a string representation of the 64bit quantity which will be
|
||||
added to the total. As attributes are added, updated, or removed (and
|
||||
particularly as a file is finally deleted), the corresponding global
|
||||
total is also updated by the file system. All the totals with their
|
||||
name, total value, and a count of contributing attributes can be read
|
||||
with the
|
||||
.IB READ_XATTR_TOTALS
|
||||
ioctl.
|
||||
.RE
|
||||
|
||||
.SH FORMAT VERSION
|
||||
The format version defines the layout and use of structures stored on
|
||||
devices and passed over the network. The version is incremented for
|
||||
every change in structures that is not backwards compatible with
|
||||
previous versions. A single version implies all changes, individual
|
||||
changes can't be selectively adopted.
|
||||
.sp
|
||||
As a new file system is created the format version is stored in both of
|
||||
the super blocks written to the metadata and data devices. By default
|
||||
the greatest supported version is written while an older supported
|
||||
version may be specified.
|
||||
.sp
|
||||
During mount the kernel module verifies that the format versions stored
|
||||
in both of the super blocks match and are supported. That version
|
||||
defines the set of features and behavior of all the mounts using the
|
||||
file system, including the network protocol that is communicated over
|
||||
the wire.
|
||||
.sp
|
||||
Any combination of software release versions that support the current
|
||||
format version of the file system can safely be used concurrently. This
|
||||
allows for rolling software updates of multiple mounts using a shared
|
||||
file system.
|
||||
.sp
|
||||
To use new incompatible features added in newer format versions the super blocks must
|
||||
be updated. This can currently only be safely performed on a
|
||||
completely and cleanly unmounted file system. The
|
||||
.BR scoutfs (8)
|
||||
.I change-format-version
|
||||
command can be used with the
|
||||
.I --offline
|
||||
option to write a newer supported version into the super blocks. It
|
||||
will fail if it sees any indication of unresolved mounts that may be
|
||||
using the devices: either active quorum members working with their
|
||||
quorum blocks or persistent records of mounted clients that haven't been
|
||||
resolved. Like creating a new file system, there is no protection
|
||||
against multiple invocations of the change command corrupting the
|
||||
system. Once the version is updated older software can no longer use
|
||||
the file system so this change should be performed with care. Once the
|
||||
newer format version is successfully written it can be mounted and newer
|
||||
features can be used.
|
||||
.sp
|
||||
Each layer of the system can show its supported format versions:
|
||||
.RS
|
||||
.TP
|
||||
.B Userspace utilities
|
||||
.B scoutfs --help
|
||||
includes the range of supported format versions for a given release
|
||||
of the userspace utilities.
|
||||
.TP
|
||||
.B Kernel module
|
||||
.I modinfo MODULE
|
||||
shows the range of supproted versions for a kernel module file in the
|
||||
.I scoutfs_format_version_min
|
||||
and
|
||||
.I scoutfs_format_version_min
|
||||
fields.
|
||||
.TP
|
||||
.B Inserted module
|
||||
The supported version range of an inserted module can be found in
|
||||
.I .note.scoutfs_format_version_min
|
||||
and
|
||||
.I .note.scoutfs_format_version_max
|
||||
notes files in the sysfs notes directory for the inserted module,
|
||||
typically
|
||||
.I /sys/module/scoutfs/notes/
|
||||
.TP
|
||||
.B Metadata and data devices
|
||||
.I scoutfs print DEVICE
|
||||
shows the
|
||||
.I fmt_vers
|
||||
field in the initial output of the super block on the device.
|
||||
.TP
|
||||
.B Mounted filesystem
|
||||
The version that a mount is using is shown in the
|
||||
.I format_version
|
||||
file in the mount's sysfs directory, typically
|
||||
.I /sys/fs/scoutfs/f.FSID.r.RID/
|
||||
.RE
|
||||
|
||||
.SH CORRUPTION DETECTION
|
||||
A
|
||||
.B scoutfs
|
||||
filesystem can detect corruption at runtime. A catalog of kernel log
|
||||
|
||||
@@ -14,6 +14,68 @@ option will, when the option is omitted, fall back to using the value of the
|
||||
environment variable. If that variable is also absent the current working
|
||||
directory will be used.
|
||||
|
||||
.TP
|
||||
.BI "change-format-version [-V, --format-version VERS] [-F|--offline META-DEVICE DATA-DEVICE]"
|
||||
.sp
|
||||
Change the format version of an existing file system. The maxmimum
|
||||
supported version is used by default. A specific version in the range
|
||||
can be specified. The range of supported versions in shown in the
|
||||
output of --help.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-F, --offline META-DEVICE DATA-DEVICE"
|
||||
Change the format version by writing directly to the metadata and data
|
||||
devices. Like mkfs, this writes directly to the devices without
|
||||
protection and must only be used on completely unmounted devices. The
|
||||
command will fail if it sees evidence of active quorum use of the device
|
||||
or of previously connected clients which haven't been reclaimed. The
|
||||
only way to avoid these checks is to fully mount and cleanly unmount the
|
||||
file system.
|
||||
.sp
|
||||
This is not an atomic operation because it writes to blocks on two
|
||||
devices. Write failure can result in the versions becoming out of sync
|
||||
which will prevent the system from mouting. To recover the error must
|
||||
be resolved so the command can be repeated and successfully write to
|
||||
the super blocks on both devices.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "change-quorum-config {-Q|--quorum-slot} NR,ADDR,PORT [-F|--offline META-DEVICE DATA-DEVICE]"
|
||||
.sp
|
||||
Change the quorum configuration for an existing file system. The new
|
||||
configuration completely replaces the old configuration. Any slots
|
||||
from the old configuration that should be retained must be described
|
||||
with arguments in the new configuration.
|
||||
.sp
|
||||
Currently the configuration may only be changed offline.
|
||||
.sp
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.B "-Q, --quorum-slot NR,ADDR,PORT"
|
||||
The quorum configuration is built by specifying configured slots with
|
||||
multiple arguments as described in the
|
||||
.B mkfs
|
||||
command.
|
||||
.TP
|
||||
.B "-F, --offline META-DEVICE"
|
||||
Perform the change offline by updating the superblock in the metadata
|
||||
device. The command will read the super block and refuse to make the
|
||||
change if it sees any evidence that the metadata device is currently in
|
||||
use. The file system must be successfully unmounted after possibly
|
||||
recovering any previously unresolved mounts for the change to be
|
||||
successful. After the change succeeds the newly configured slots can
|
||||
be used by mounts.
|
||||
.sp
|
||||
The offline change directly reads from and writes to the device and does
|
||||
not protect against concurrent use of the device. It must be carefully
|
||||
run when the file system will not be mounted.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "df [-h|--human-readable] [-p|--path PATH]"
|
||||
.sp
|
||||
@@ -32,10 +94,18 @@ A path within a ScoutFS filesystem.
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-slot} NR,ADDR,PORT [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-slot} NR,ADDR,PORT [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-z|--data-alloc-zone-blocks BLOCKS] [-f|--force] [-A|--allow-small-size] [-V|--format-version VERS]"
|
||||
.sp
|
||||
Initialize a new ScoutFS filesystem on the target devices. Since ScoutFS uses
|
||||
separate block devices for its metadata and data storage, two are required.
|
||||
The internal structures and nature of metadata and data transactions
|
||||
lead to minimum viable device sizes.
|
||||
.B mkfs
|
||||
will check both devices and fail with an error if either are under the
|
||||
minimum size. If
|
||||
.B --allow-small-size
|
||||
is given then sizes under the minimum size will be
|
||||
allowed after printing an informational warning.
|
||||
.sp
|
||||
If
|
||||
.B --force
|
||||
@@ -81,12 +151,84 @@ kibibytes, mebibytes, etc.
|
||||
.B "-d, --max-data-size SIZE"
|
||||
Same as previous, but for limiting the size of the data device.
|
||||
.TP
|
||||
.B "-A, --allow-small-size"
|
||||
Allows use of specified device sizes less than the minimum. This can
|
||||
result in bad behaviour and is only intended for testing.
|
||||
.TP
|
||||
.B "-z, --data-alloc-zone-blocks BLOCKS"
|
||||
Set the data_alloc_zone_blocks volume option, as described in
|
||||
.BR scoutfs (5).
|
||||
.TP
|
||||
.B "-f, --force"
|
||||
Ignore presence of existing data on the data and metadata devices.
|
||||
.TP
|
||||
.B "-V, --format-verson"
|
||||
Specify the format version to use in the newly created file system.
|
||||
The range of supported versions is visible in the output of
|
||||
+.BR scoutfs (8)
|
||||
+.I --help
|
||||
.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "resize-devices [-p|--path PATH] [-m|--meta-size SIZE] [-d|--data-size SIZE]"
|
||||
.sp
|
||||
Resize the metadata or data devices of a mounted ScoutFS filesystem.
|
||||
.sp
|
||||
ScoutFS metadata has free extent records and fields in the super block
|
||||
that reflect the size of the devices in use. This command sends a
|
||||
request to the server to change the size of the device that can be used
|
||||
by updating free extents and setting the super block fields.
|
||||
.sp
|
||||
The specified sizes are in bytes and are translated into block counts.
|
||||
If the specified sizes are not a multiple of the metadata or data block
|
||||
sizes then a message is output and the resized size is truncated down to
|
||||
the next whole block. Specifying either a size of 0 or the current
|
||||
device size makes no change. The current size of the devices can be
|
||||
seen, in units of their respective block sizes, in the total_meta_blocks
|
||||
and total_data_blocks fields returned by the scoutfs statfs command (via
|
||||
the statfs_more ioctl).
|
||||
.sp
|
||||
Shrinking is not supported. Specifying a smaller size for either device
|
||||
will return an error and neither device will be resized.
|
||||
.sp
|
||||
Specifying a larger size will expand the initial size of the device that
|
||||
will be used. Free space records are added for the expanded region and
|
||||
can be used once the resizing transaction is complete.
|
||||
.sp
|
||||
The resizing action is performed in a transaction on the server. This
|
||||
command will hang until a server is elected and running and can service
|
||||
the reqeust. The server serializes any concurrent requests to resize.
|
||||
.sp
|
||||
The new sizes must fit within the current sizes of the mounted devices.
|
||||
Presumably this command is being performed as part of a larger
|
||||
coordinated resize of the underlying devices. The device must be
|
||||
expanded before ScoutFS can use the larger device and ScoutFS must stop
|
||||
using a region to shrink before it could be removed from the device
|
||||
(which is not currently supported).
|
||||
.sp
|
||||
The resize will be committed by the server before the response is sent
|
||||
to the client. The system can be using the new device size before the
|
||||
result is communicated through the client and this command completes.
|
||||
The client could crash and the server could still have performed the
|
||||
resize.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-p, --path PATH"
|
||||
A path in the mounted ScoutFS filesystem which will have its devices
|
||||
resized.
|
||||
.TP
|
||||
.B "-m, --meta-size SIZE"
|
||||
.B "-d, --data-size SIZE"
|
||||
The new size of the metadata or data device to use, in bytes. Size is given as
|
||||
an integer followed by a units digit: "K", "M", "G", "T", "P", to denote
|
||||
kibibytes, mebibytes, etc.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.BI "stat FILE [-s|--single-field FIELD-NAME]"
|
||||
.sp
|
||||
Display ScoutFS-specific metadata fields for the given file.
|
||||
@@ -475,6 +617,33 @@ command is used first.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "get-allocated-inos [-i|--ino INO] [-s|--single] [-p|--path PATH]"
|
||||
.sp
|
||||
This debugging command prints allocated inode numbers. It only prints
|
||||
inodes
|
||||
found in the group that contains the starting inode. The printed inode
|
||||
numbers aren't necessarily reachable. They could be anywhere in the
|
||||
process from being unlinked to finally deleted when their items
|
||||
were found.
|
||||
.RS 1.0i
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-i, --ino INO"
|
||||
The first 64bit inode number which could be printed.
|
||||
.TP
|
||||
.B "-s, --single"
|
||||
Only print the single starting inode when it is allocated, all other allocated
|
||||
inode numbers will be ignored.
|
||||
.TP
|
||||
.B "-p, --path PATH"
|
||||
A path within a ScoutFS filesystem.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.TP
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR scoutfs (5),
|
||||
.BR xattr (7),
|
||||
|
||||
@@ -54,12 +54,18 @@ cp man/*.8.gz $RPM_BUILD_ROOT%{_mandir}/man8/.
|
||||
install -m 755 -D src/scoutfs $RPM_BUILD_ROOT%{_sbindir}/scoutfs
|
||||
install -m 644 -D src/ioctl.h $RPM_BUILD_ROOT%{_includedir}/scoutfs/ioctl.h
|
||||
install -m 644 -D src/format.h $RPM_BUILD_ROOT%{_includedir}/scoutfs/format.h
|
||||
install -m 755 -D fenced/scoutfs-fenced $RPM_BUILD_ROOT%{_libexecdir}/scoutfs-fenced/scoutfs-fenced
|
||||
install -m 644 -D fenced/scoutfs-fenced.service $RPM_BUILD_ROOT%{_unitdir}/scoutfs-fenced.service
|
||||
install -m 644 -D fenced/scoutfs-fenced.conf.example $RPM_BUILD_ROOT%{_sysconfdir}/scoutfs/scoutfs-fenced.conf.example
|
||||
|
||||
%files
|
||||
%defattr(644,root,root,755)
|
||||
%{_mandir}/man*/scoutfs*.gz
|
||||
%{_unitdir}/scoutfs-fenced.service
|
||||
%{_sysconfdir}/scoutfs
|
||||
%defattr(755,root,root,755)
|
||||
%{_sbindir}/scoutfs
|
||||
%{_libexecdir}/scoutfs-fenced
|
||||
|
||||
%files -n scoutfs-devel
|
||||
%defattr(644,root,root,755)
|
||||
|
||||
@@ -40,7 +40,7 @@ static void *alloc_val(struct scoutfs_btree_block *bt, int len)
|
||||
{
|
||||
le16_add_cpu(&bt->mid_free_len, -len);
|
||||
le16_add_cpu(&bt->total_item_bytes, len);
|
||||
return (void *)bt + le16_to_cpu(bt->mid_free_len);
|
||||
return (void *)&bt->items[le16_to_cpu(bt->nr_items)] + le16_to_cpu(bt->mid_free_len);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -75,6 +75,9 @@ void btree_append_item(struct scoutfs_btree_block *bt,
|
||||
le16_add_cpu(&bt->total_item_bytes, sizeof(struct scoutfs_btree_item));
|
||||
|
||||
item->key = *key;
|
||||
item->seq = cpu_to_le64(1);
|
||||
item->flags = 0;
|
||||
|
||||
leaf_item_hash_insert(bt, &item->key,
|
||||
cpu_to_le16((void *)item - (void *)bt));
|
||||
if (val_len == 0)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user