mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-26 05:12:03 +00:00
Compare commits
44 Commits
v1.1
...
zab/increa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48f1305a8a | ||
|
|
e423d42106 | ||
|
|
82d2be2e4a | ||
|
|
4102b760d0 | ||
|
|
65654ee7c0 | ||
|
|
b2d6ceeb9c | ||
|
|
d8231016f8 | ||
|
|
3c2b329675 | ||
|
|
96ad8dd510 | ||
|
|
44f38a31ec | ||
|
|
fb2ff753ad | ||
|
|
bb3db7e272 | ||
|
|
c94b072925 | ||
|
|
26ae9c6e04 | ||
|
|
c8d7221ec5 | ||
|
|
7fd03dc311 | ||
|
|
4e8a088cc5 | ||
|
|
9c751c1197 | ||
|
|
875583b7ef | ||
|
|
38e5aa77c4 | ||
|
|
57a1d75e52 | ||
|
|
51d19d797f | ||
|
|
029a684c25 | ||
|
|
f2679d9598 | ||
|
|
bddca171ee | ||
|
|
18171b8543 | ||
|
|
d846eec5e8 | ||
|
|
e2c90339c5 | ||
|
|
4a0b14a4f2 | ||
|
|
90518a0fbd | ||
|
|
cd23cc61ca | ||
|
|
a67ea30bb7 | ||
|
|
f3b7c683f0 | ||
|
|
8decc54467 | ||
|
|
5adcf7677f | ||
|
|
07f03d499f | ||
|
|
c5068efef0 | ||
|
|
66678dc63b | ||
|
|
b2834d3c28 | ||
|
|
cff50bec6b | ||
|
|
4d6350b3b0 | ||
|
|
48966b42bb | ||
|
|
97cb8ad50d | ||
|
|
ae08a797ae |
@@ -2,9 +2,54 @@ Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.2-rc
|
||||
v1.3
|
||||
\
|
||||
*TBD*
|
||||
*Apr 7, 2022*
|
||||
|
||||
* **Fix rare server instability under heavy load**
|
||||
\
|
||||
Fixed a case of server instability under heavy load due to concurrent
|
||||
work fully exhausting metadata block allocation pools reserved for a
|
||||
single server transaction. This would cause brief interruption as the
|
||||
server shutdown and the next server started up and made progress as
|
||||
pending work was retried.
|
||||
|
||||
* **Fix slow fencing preventing server startup**
|
||||
\
|
||||
If a server had to process many fence requests with a slow fencing
|
||||
mechanism it could be interrupted before it finished. The server
|
||||
now makes sure heartbeat messages are sent while it is making progress
|
||||
on fencing requests so that other quorum members don't interrupt the
|
||||
process.
|
||||
|
||||
* **Performance improvement in getxattr and setxattr**
|
||||
\
|
||||
Kernel allocation patterns in the getxattr and setxattr
|
||||
implementations were causing significant contention between CPUs. Their
|
||||
allocation strategy was changed so that concurrent tasks can call these
|
||||
xattr methods without degrading performance.
|
||||
|
||||
---
|
||||
v1.2
|
||||
\
|
||||
*Mar 14, 2022*
|
||||
|
||||
* **Fix deadlock between fallocate() and read() system calls**
|
||||
\
|
||||
Fixed a lock inversion that could cause two tasks to deadlock if they
|
||||
performed fallocate() and read() on a file at the same time. The
|
||||
deadlock was uninterruptible so the machine needed to be rebooted. This
|
||||
was relatively rare as fallocate() is usually used to prepare files
|
||||
before they're used.
|
||||
|
||||
* **Fix instability from heavy file deletion workloads**
|
||||
\
|
||||
Fixed rare circumstances under which background file deletion cleanup
|
||||
tasks could try to delete a file while it is being deleted by another
|
||||
task. Heavy load across multiple nodes, either many files being deleted
|
||||
or large files being deleted, increased the chances of this happening.
|
||||
Heavy staging could cause this problem because staging can create many
|
||||
internal temporary files that need to be deleted.
|
||||
|
||||
---
|
||||
v1.1
|
||||
|
||||
@@ -1318,6 +1318,17 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
return lo;
|
||||
}
|
||||
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space)
|
||||
{
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
*avail_total = le32_to_cpu(alloc->avail.first_nr);
|
||||
*freed_space = list_block_space(alloc->freed.first_nr);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
}
|
||||
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag)
|
||||
{
|
||||
|
||||
@@ -158,6 +158,7 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr);
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space);
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag);
|
||||
|
||||
|
||||
@@ -2449,7 +2449,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int alloc_low)
|
||||
struct scoutfs_btree_root *root, int free_budget)
|
||||
{
|
||||
u64 blknos[SCOUTFS_BTREE_MAX_HEIGHT];
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -2459,11 +2459,15 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_avl_node *node;
|
||||
struct scoutfs_avl_node *next;
|
||||
struct scoutfs_key par_next;
|
||||
int nr_freed = 0;
|
||||
int nr_par;
|
||||
int level;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(free_budget <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(root->height > ARRAY_SIZE(blknos)))
|
||||
return -EIO; /* XXX corruption */
|
||||
|
||||
@@ -2538,8 +2542,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
while (node) {
|
||||
|
||||
/* make sure we can always free parents after leaves */
|
||||
if (scoutfs_alloc_meta_low(sb, alloc,
|
||||
alloc_low + nr_par + 1)) {
|
||||
if ((nr_freed + 1 + nr_par) > free_budget) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -2553,6 +2556,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
le64_to_cpu(ref.blkno));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
nr_freed++;
|
||||
|
||||
node = scoutfs_avl_next(&bt->item_root, node);
|
||||
if (node) {
|
||||
@@ -2568,6 +2572,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
blknos[i]);
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blknos[i]);
|
||||
BUG_ON(ret); /* checked meta low, freed should fit */
|
||||
nr_freed++;
|
||||
}
|
||||
|
||||
/* restart walk past the subtree we just freed */
|
||||
|
||||
@@ -125,7 +125,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int alloc_low);
|
||||
struct scoutfs_btree_root *root, int free_budget);
|
||||
|
||||
void scoutfs_btree_put_iref(struct scoutfs_btree_item_ref *iref);
|
||||
|
||||
|
||||
@@ -477,12 +477,15 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
bool am_quorum;
|
||||
int ret;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
am_quorum = opts.quorum_slot_nr >= 0;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
|
||||
@@ -152,11 +152,12 @@
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_attempts) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(orphan_scan_read) \
|
||||
EXPAND_COUNTER(quorum_candidate_server_stopping) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
|
||||
@@ -983,9 +983,6 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
u64 last;
|
||||
s64 ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
down_write(&si->extent_sem);
|
||||
|
||||
/* XXX support more flags */
|
||||
if (mode & ~(FALLOC_FL_KEEP_SIZE)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
@@ -1003,18 +1000,22 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_mutex;
|
||||
|
||||
inode_dio_wait(inode);
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
|
||||
(offset + len > i_size_read(inode))) {
|
||||
ret = inode_newsize_ok(inode, offset + len);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_extent;
|
||||
}
|
||||
|
||||
iblock = offset >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
@@ -1024,7 +1025,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_extent;
|
||||
|
||||
ret = fallocate_extents(sb, inode, iblock, last, lock);
|
||||
|
||||
@@ -1050,17 +1051,19 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
}
|
||||
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
goto out_extent;
|
||||
|
||||
iblock += ret;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
out_extent:
|
||||
up_write(&si->extent_sem);
|
||||
out_mutex:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
out:
|
||||
trace_scoutfs_data_fallocate(sb, ino, mode, offset, len, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -720,7 +720,7 @@ static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
|
||||
struct list_head *ind_locks)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct inode *inode;
|
||||
struct inode *inode = NULL;
|
||||
u64 ind_seq;
|
||||
int ret = 0;
|
||||
u64 ino;
|
||||
@@ -765,11 +765,9 @@ retry:
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
inode = scoutfs_new_inode(sb, dir, mode, rdev, ino, *inode_lock);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
ret = scoutfs_new_inode(sb, dir, mode, rdev, ino, *inode_lock, &inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_dirty_inode_item(dir, *dir_lock);
|
||||
out:
|
||||
@@ -787,6 +785,8 @@ out_unlock:
|
||||
*orph_lock = NULL;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(inode))
|
||||
iput(inode);
|
||||
inode = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@@ -1319,11 +1319,11 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
insert_inode_hash(inode);
|
||||
/* XXX need to set i_op/fop before here for sec callbacks */
|
||||
d_instantiate(dentry, inode);
|
||||
inode = NULL;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0) {
|
||||
/* XXX remove inode items */
|
||||
if (!IS_ERR_OR_NULL(inode))
|
||||
iput(inode);
|
||||
|
||||
symlink_item_ops(sb, SYM_DELETE, scoutfs_ino(inode), inode_lock,
|
||||
NULL, name_len);
|
||||
@@ -1334,6 +1334,9 @@ out:
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
|
||||
if (!IS_ERR_OR_NULL(inode))
|
||||
iput(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1923,10 +1926,8 @@ static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mod
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
ret = scoutfs_inode_orphan_create(sb, scoutfs_ino(inode), orph_lock);
|
||||
if (ret < 0) {
|
||||
iput(inode);
|
||||
if (ret < 0)
|
||||
goto out; /* XXX returning error but items created */
|
||||
}
|
||||
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
si->crtime = inode->i_mtime;
|
||||
@@ -1939,7 +1940,6 @@ static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mod
|
||||
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
iput(inode);
|
||||
|
||||
out:
|
||||
scoutfs_release_trans(sb);
|
||||
@@ -1948,6 +1948,9 @@ out:
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
if (!IS_ERR_OR_NULL(inode))
|
||||
iput(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -395,12 +395,13 @@ int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies)
|
||||
int scoutfs_fence_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct fence_info *fi;
|
||||
int ret;
|
||||
|
||||
/* can only fence if we can be elected by quorum */
|
||||
if (opts->quorum_slot_nr == -1) {
|
||||
scoutfs_options_read(sb, &opts);
|
||||
if (opts.quorum_slot_nr == -1) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
494
kmod/src/inode.c
494
kmod/src/inode.c
@@ -66,10 +66,6 @@ struct inode_sb_info {
|
||||
|
||||
struct delayed_work orphan_scan_dwork;
|
||||
|
||||
/* serialize multiple inode ->evict trying to delete same ino's items */
|
||||
spinlock_t deleting_items_lock;
|
||||
struct list_head deleting_items_list;
|
||||
|
||||
struct work_struct iput_work;
|
||||
struct llist_head iput_llist;
|
||||
};
|
||||
@@ -662,22 +658,12 @@ void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off)
|
||||
} while (read_seqcount_retry(&si->seqcount, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* We have inversions between getting cluster locks while performing
|
||||
* final deletion on a freeing inode and waiting on a freeing inode
|
||||
* while holding a cluster lock.
|
||||
*
|
||||
* We can avoid these deadlocks by hiding freeing inodes in our hash
|
||||
* lookup function. We're fine with either returning null or populating
|
||||
* a new inode overlapping with eviction freeing a previous instance of
|
||||
* the inode.
|
||||
*/
|
||||
static int scoutfs_iget_test(struct inode *inode, void *arg)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
u64 *ino = arg;
|
||||
|
||||
return (si->ino == *ino) && !(inode->i_state & I_FREEING);
|
||||
return si->ino == *ino;
|
||||
}
|
||||
|
||||
static int scoutfs_iget_set(struct inode *inode, void *arg)
|
||||
@@ -691,11 +677,35 @@ static int scoutfs_iget_set(struct inode *inode, void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino)
|
||||
/*
|
||||
* There's a risk of a deadlock between lock invalidation and eviction.
|
||||
* Invalidation blocks locks while looking up inodes. Eviction blocks
|
||||
* inode lookups while trying to get a lock.
|
||||
*
|
||||
* We have an inode lookup variant which will never block waiting for an
|
||||
* inode. This is more aggressive than base ilookup5_nowait() which
|
||||
* will, you know, wait for inodes that are being freed. We have our
|
||||
* test function hide those inodes from find_inode so that it won't wait
|
||||
* on them.
|
||||
*
|
||||
* These semantics are sufficiently weird that we use a big giant scary
|
||||
* looking function name to deter use.
|
||||
*/
|
||||
static int ilookup_test_nonewfree(struct inode *inode, void *arg)
|
||||
{
|
||||
return ilookup5(sb, ino, scoutfs_iget_test, &ino);
|
||||
return scoutfs_iget_test(inode, arg) &&
|
||||
!(inode->i_state & (I_NEW | I_WILL_FREE | I_FREEING));
|
||||
}
|
||||
struct inode *scoutfs_ilookup_nowait_nonewfree(struct super_block *sb, u64 ino)
|
||||
{
|
||||
return ilookup5_nowait(sb, ino, ilookup_test_nonewfree, &ino);
|
||||
}
|
||||
|
||||
/*
|
||||
* Final iput can delete an unused inode's items which can take multiple
|
||||
* locked transactions. iget (which can call iput in error cases) and
|
||||
* iput must not be called with locks or transactions held.
|
||||
*/
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf)
|
||||
{
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
@@ -703,32 +713,36 @@ struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf)
|
||||
struct inode *inode = NULL;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_READ, lkf, ino, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* wait for vfs inode (I_FREEING in particular) before acquiring cluster lock */
|
||||
inode = iget5_locked(sb, ino, scoutfs_iget_test, scoutfs_iget_set, &ino);
|
||||
if (!inode) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_READ, lkf, ino, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (inode->i_state & I_NEW) {
|
||||
/* XXX ensure refresh, instead clear in drop_inode? */
|
||||
si = SCOUTFS_I(inode);
|
||||
atomic64_set(&si->last_refreshed, 0);
|
||||
inode->i_version = 0;
|
||||
}
|
||||
|
||||
ret = scoutfs_inode_refresh(inode, lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = scoutfs_inode_refresh(inode, lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if ((igf & SCOUTFS_IGF_LINKED) && inode->i_nlink == 0) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
/* check nlink both for new and after refreshing */
|
||||
if ((igf & SCOUTFS_IGF_LINKED) && inode->i_nlink == 0) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_omap_inc(sb, ino);
|
||||
if (inode->i_state & I_NEW) {
|
||||
ret = scoutfs_omap_set(sb, ino);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -741,8 +755,12 @@ out:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (ret < 0) {
|
||||
if (inode)
|
||||
iget_failed(inode);
|
||||
if (inode) {
|
||||
if (inode->i_state & I_NEW)
|
||||
iget_failed(inode);
|
||||
else
|
||||
iput(inode);
|
||||
}
|
||||
inode = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@@ -1393,10 +1411,14 @@ out:
|
||||
/*
|
||||
* Allocate and initialize a new inode. The caller is responsible for
|
||||
* creating links to it and updating it. @dir can be null.
|
||||
*
|
||||
* This is called with locks and a transaction because it creates the
|
||||
* inode item. We can't call iput on the new inode on error. We
|
||||
* return the inode to the caller *including on error* for them to put
|
||||
* once they've released the transaction.
|
||||
*/
|
||||
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t rdev, u64 ino,
|
||||
struct scoutfs_lock *lock)
|
||||
int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t rdev,
|
||||
u64 ino, struct scoutfs_lock *lock, struct inode **inode_ret)
|
||||
{
|
||||
struct scoutfs_inode_info *si;
|
||||
struct scoutfs_key key;
|
||||
@@ -1405,8 +1427,10 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
int ret;
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!inode) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
si = SCOUTFS_I(inode);
|
||||
si->ino = ino;
|
||||
@@ -1434,20 +1458,17 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
store_inode(&sinode, inode);
|
||||
scoutfs_inode_init_key(&key, scoutfs_ino(inode));
|
||||
|
||||
ret = scoutfs_omap_inc(sb, ino);
|
||||
ret = scoutfs_omap_set(sb, ino);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_item_create(sb, &key, &sinode, sizeof(sinode), lock);
|
||||
if (ret < 0)
|
||||
scoutfs_omap_dec(sb, ino);
|
||||
scoutfs_omap_clear(sb, ino);
|
||||
out:
|
||||
if (ret) {
|
||||
iput(inode);
|
||||
inode = ERR_PTR(ret);
|
||||
}
|
||||
*inode_ret = inode;
|
||||
|
||||
return inode;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void init_orphan_key(struct scoutfs_key *key, u64 ino)
|
||||
@@ -1482,44 +1503,6 @@ int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_
|
||||
return scoutfs_item_delete_force(sb, &key, lock);
|
||||
}
|
||||
|
||||
struct deleting_ino_entry {
|
||||
struct list_head head;
|
||||
u64 ino;
|
||||
};
|
||||
|
||||
static bool added_deleting_ino(struct inode_sb_info *inf, struct deleting_ino_entry *del, u64 ino)
|
||||
{
|
||||
struct deleting_ino_entry *tmp;
|
||||
bool added = true;
|
||||
|
||||
spin_lock(&inf->deleting_items_lock);
|
||||
|
||||
list_for_each_entry(tmp, &inf->deleting_items_list, head) {
|
||||
if (tmp->ino == ino) {
|
||||
added = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (added) {
|
||||
del->ino = ino;
|
||||
list_add_tail(&del->head, &inf->deleting_items_list);
|
||||
}
|
||||
|
||||
spin_unlock(&inf->deleting_items_lock);
|
||||
|
||||
return added;
|
||||
}
|
||||
|
||||
static void del_deleting_ino(struct inode_sb_info *inf, struct deleting_ino_entry *del)
|
||||
{
|
||||
if (del->ino) {
|
||||
spin_lock(&inf->deleting_items_lock);
|
||||
list_del_init(&del->head);
|
||||
spin_unlock(&inf->deleting_items_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove all the items associated with a given inode. This is only
|
||||
* called once nlink has dropped to zero and nothing has the inode open
|
||||
@@ -1528,22 +1511,10 @@ static void del_deleting_ino(struct inode_sb_info *inf, struct deleting_ino_entr
|
||||
* orphan item will continue triggering attempts to finish previous
|
||||
* partial deletion until all deletion is complete and the orphan item
|
||||
* is removed.
|
||||
*
|
||||
* Currently this can be called multiple times for multiple cached
|
||||
* inodes for a given ino number (ilookup avoids freeing inodes to avoid
|
||||
* cluster lock<->inode flag waiting inversions). Some items are not
|
||||
* safe to delete concurrently, for example concurrent data truncation
|
||||
* could free extents multiple times. We use a very silly list of inos
|
||||
* being deleted. Duplicates just return success. If the first
|
||||
* deletion ends up failing orphan deletion will come back around later
|
||||
* and retry.
|
||||
*/
|
||||
static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *orph_lock)
|
||||
static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_inode *sinode,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *orph_lock)
|
||||
{
|
||||
DECLARE_INODE_SB_INFO(sb, inf);
|
||||
struct deleting_ino_entry del = {{NULL, }};
|
||||
struct scoutfs_inode sinode;
|
||||
struct scoutfs_key key;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool release = false;
|
||||
@@ -1552,30 +1523,10 @@ static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lo
|
||||
u64 size;
|
||||
int ret;
|
||||
|
||||
if (!added_deleting_ino(inf, &del, ino)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_inode_init_key(&key, ino);
|
||||
|
||||
ret = scoutfs_item_lookup_exact(sb, &key, &sinode, sizeof(sinode),
|
||||
lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX corruption, inode probably won't be freed without repair */
|
||||
if (le32_to_cpu(sinode.nlink)) {
|
||||
scoutfs_warn(sb, "Dangling orphan item for inode %llu.", ino);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mode = le32_to_cpu(sinode.mode);
|
||||
size = le64_to_cpu(sinode.size);
|
||||
mode = le32_to_cpu(sinode->mode);
|
||||
size = le64_to_cpu(sinode->size);
|
||||
trace_scoutfs_delete_inode(sb, ino, mode, size);
|
||||
|
||||
/* remove data items in their own transactions */
|
||||
@@ -1593,7 +1544,7 @@ static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lo
|
||||
/* then delete the small known number of remaining inode items */
|
||||
retry:
|
||||
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
prepare_index_deletion(sb, &ind_locks, ino, mode, &sinode) ?:
|
||||
prepare_index_deletion(sb, &ind_locks, ino, mode, sinode) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq, false);
|
||||
if (ret > 0)
|
||||
goto retry;
|
||||
@@ -1602,7 +1553,7 @@ retry:
|
||||
|
||||
release = true;
|
||||
|
||||
ret = remove_index_items(sb, ino, &sinode, &ind_locks);
|
||||
ret = remove_index_items(sb, ino, sinode, &ind_locks);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1612,15 +1563,21 @@ retry:
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_item_delete(sb, &key, lock);
|
||||
if (ret)
|
||||
/* make sure inode item and orphan are deleted together */
|
||||
ret = scoutfs_item_dirty(sb, &key, lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_orphan_delete(sb, ino, orph_lock);
|
||||
if (ret == 0)
|
||||
scoutfs_forest_dec_inode_count(sb);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_item_delete(sb, &key, lock);
|
||||
BUG_ON(ret != 0); /* dirtying should have guaranteed success */
|
||||
|
||||
scoutfs_forest_dec_inode_count(sb);
|
||||
|
||||
out:
|
||||
del_deleting_ino(inf, &del);
|
||||
if (release)
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
@@ -1628,48 +1585,192 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct inode_deletion_lock_data {
|
||||
wait_queue_head_t waitq;
|
||||
atomic64_t seq;
|
||||
struct scoutfs_open_ino_map map;
|
||||
unsigned long trying[DIV_ROUND_UP(SCOUTFS_OPEN_INO_MAP_BITS, BITS_PER_LONG)];
|
||||
};
|
||||
|
||||
/*
|
||||
* iput_final has already written out the dirty pages to the inode
|
||||
* before we get here. We're left with a clean inode that we have to
|
||||
* tear down. We use locking and open inode number bitmaps to decide if
|
||||
* we should finally destroy an inode that is no longer open nor
|
||||
* reachable through directory entries.
|
||||
* Get a lock data struct that has the current omap from this hold of
|
||||
* the lock. The lock data is saved on the lock so it can be used
|
||||
* multiple times until the lock is refreshed. Only one task will send
|
||||
* an omap request at a time, and errors are only returned by each task
|
||||
* as it gets a response to its send.
|
||||
*/
|
||||
static int get_current_lock_data(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
struct inode_deletion_lock_data **ldata_ret, u64 group_nr)
|
||||
{
|
||||
struct inode_deletion_lock_data *ldata;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
/* we're storing omap maps in locks, they need to cover the same number of inodes */
|
||||
BUILD_BUG_ON(SCOUTFS_OPEN_INO_MAP_BITS != SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
|
||||
/* allocate a new lock data struct as needed */
|
||||
while ((ldata = cmpxchg(&lock->inode_deletion_data, NULL, NULL)) == NULL) {
|
||||
ldata = kzalloc(sizeof(struct inode_deletion_lock_data), GFP_NOFS);
|
||||
if (!ldata) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic64_set(&ldata->seq, lock->write_seq - 1); /* ensure refresh */
|
||||
init_waitqueue_head(&ldata->waitq);
|
||||
|
||||
/* the lock kfrees the inode_deletion_data pointer along with the lock */
|
||||
if (cmpxchg(&lock->inode_deletion_data, NULL, ldata) == NULL)
|
||||
break;
|
||||
else
|
||||
kfree(ldata);
|
||||
}
|
||||
|
||||
/* make sure that the lock's data is current */
|
||||
while ((seq = atomic64_read(&ldata->seq)) != lock->write_seq) {
|
||||
if (seq != U64_MAX && atomic64_cmpxchg(&ldata->seq, seq, U64_MAX) == seq) {
|
||||
/* ask the server for current omap */
|
||||
ret = scoutfs_client_open_ino_map(sb, group_nr, &ldata->map);
|
||||
if (ret == 0)
|
||||
atomic64_set(&ldata->seq, lock->write_seq);
|
||||
else
|
||||
atomic64_set(&ldata->seq, lock->write_seq - 1);
|
||||
wake_up(&ldata->waitq);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else {
|
||||
/* wait for someone else who's sent a request */
|
||||
wait_event(ldata->waitq, atomic64_read(&ldata->seq) != U64_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0)
|
||||
ldata = NULL;
|
||||
*ldata_ret = ldata;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to delete all the items for an unused inode number. This is the
|
||||
* relatively slow path that uses cluster locks, network requests, and
|
||||
* IO to ensure correctness. Callers should try hard to avoid calling
|
||||
* when there's no work to do.
|
||||
*
|
||||
* Because lookup ignores freeing inodes we can get here from multiple
|
||||
* instances of an inode that is being deleted. Orphan scanning in
|
||||
* particular can race with deletion. delete_inode_items() resolves
|
||||
* concurrent attempts.
|
||||
* Inode references are added under cluster locks. In-memory vfs cache
|
||||
* references are added under read cluster locks and are visible in omap
|
||||
* bitmaps. Directory entry references are added under write cluster
|
||||
* locks and are visible in the inode's nlink. Orphan items exist
|
||||
* whenever nlink == 0 and are maintained under write cluster locks.
|
||||
* Directory entries can be added to an inode with nlink == 0 to
|
||||
* instantiate tmpfile inodes into the name space. Cached inodes will
|
||||
* not be created for inodes with an nlink of 0.
|
||||
*
|
||||
* Combining all this we know that it's safe to delete an inode's items
|
||||
* when we hold an exclusive write cluster lock, the inode has nlink ==
|
||||
* 0, and an omap request protected by the lock doesn't have the inode's
|
||||
* bit set.
|
||||
*
|
||||
* This is called by orphan scanning and vfs inode cache eviction after
|
||||
* they've checked that the inode could really be deleted. We serialize
|
||||
* on a bit in the lock data so that we only have one deletion attempt
|
||||
* per inode under this mount's cluster lock.
|
||||
*/
|
||||
static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
{
|
||||
struct inode_deletion_lock_data *ldata = NULL;
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct scoutfs_inode sinode;
|
||||
struct scoutfs_key key;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
ret = get_current_lock_data(sb, lock, &ldata, group_nr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* only one local attempt per inode at a time */
|
||||
if (test_and_set_bit(bit_nr, ldata->trying)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* can't delete if it's cached in local or remote mounts */
|
||||
if (scoutfs_omap_test(sb, ino) || test_bit_le(bit_nr, ldata->map.bits)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_inode_init_key(&key, ino);
|
||||
ret = scoutfs_item_lookup_exact(sb, &key, &sinode, sizeof(sinode), lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(sinode.nlink) > 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, ino, &orph_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = delete_inode_items(sb, ino, &sinode, lock, orph_lock);
|
||||
out:
|
||||
if (ldata)
|
||||
clear_bit(bit_nr, ldata->trying);
|
||||
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* As we drop an inode we need to decide to try and delete its items or
|
||||
* not, which is expensive. The two common cases we want to get right
|
||||
* both have cluster lock coverage and don't want to delete. Dropping
|
||||
* unused inodes during read lock invalidation has the current lock and
|
||||
* sees a nonzero nlink and knows not to delete. Final iput after a
|
||||
* local unlink also has a lock, sees a zero nlink, and tries to perform
|
||||
* item deletion in the task that dropped the last link, as users
|
||||
* expect.
|
||||
*
|
||||
* Evicting an inode outside of cluster locking is the odd slow path
|
||||
* that involves lock contention during use the worst cross-mount
|
||||
* open-unlink/delete case.
|
||||
*/
|
||||
void scoutfs_evict_inode(struct inode *inode)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_lock *orph_lock;
|
||||
struct scoutfs_lock *lock;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_evict_inode(inode->i_sb, scoutfs_ino(inode),
|
||||
inode->i_nlink, is_bad_inode(inode));
|
||||
trace_scoutfs_evict_inode(sb, ino, inode->i_nlink, is_bad_inode(inode));
|
||||
|
||||
if (is_bad_inode(inode))
|
||||
goto clear;
|
||||
if (!is_bad_inode(inode)) {
|
||||
truncate_inode_pages_final(&inode->i_data);
|
||||
|
||||
truncate_inode_pages_final(&inode->i_data);
|
||||
/* clear before trying to delete tests */
|
||||
scoutfs_omap_clear(sb, ino);
|
||||
|
||||
ret = scoutfs_omap_should_delete(sb, inode, &lock, &orph_lock);
|
||||
if (ret > 0) {
|
||||
ret = delete_inode_items(inode->i_sb, scoutfs_ino(inode), lock, orph_lock);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, orph_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
}
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "error %d while checking to delete inode nr %llu, it might linger.",
|
||||
ret, ino);
|
||||
if (!scoutfs_lock_is_covered(sb, &si->ino_lock_cov) || inode->i_nlink == 0)
|
||||
try_delete_inode_items(sb, scoutfs_ino(inode));
|
||||
}
|
||||
|
||||
scoutfs_omap_dec(sb, ino);
|
||||
|
||||
clear:
|
||||
clear_inode(inode);
|
||||
}
|
||||
|
||||
@@ -1745,18 +1846,26 @@ void scoutfs_inode_queue_iput(struct inode *inode)
|
||||
/*
|
||||
* All mounts are performing this work concurrently. We introduce
|
||||
* significant jitter between them to try and keep them from all
|
||||
* bunching up and working on the same inodes.
|
||||
* bunching up and working on the same inodes. We always try to delay
|
||||
* for at least one jiffy if precision tricks us into calculating no
|
||||
* delay.
|
||||
*/
|
||||
static void schedule_orphan_dwork(struct inode_sb_info *inf)
|
||||
void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb)
|
||||
{
|
||||
#define ORPHAN_SCAN_MIN_MS (10 * MSEC_PER_SEC)
|
||||
#define ORPHAN_SCAN_JITTER_MS (40 * MSEC_PER_SEC)
|
||||
unsigned long delay = msecs_to_jiffies(ORPHAN_SCAN_MIN_MS +
|
||||
prandom_u32_max(ORPHAN_SCAN_JITTER_MS));
|
||||
DECLARE_INODE_SB_INFO(sb, inf);
|
||||
struct scoutfs_mount_options opts;
|
||||
unsigned long low;
|
||||
unsigned long high;
|
||||
unsigned long delay;
|
||||
|
||||
if (!inf->stopped) {
|
||||
delay = msecs_to_jiffies(ORPHAN_SCAN_MIN_MS +
|
||||
prandom_u32_max(ORPHAN_SCAN_JITTER_MS));
|
||||
schedule_delayed_work(&inf->orphan_scan_dwork, delay);
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
low = (opts.orphan_scan_delay_ms * 80) / 100;
|
||||
high = (opts.orphan_scan_delay_ms * 120) / 100;
|
||||
delay = msecs_to_jiffies(low + prandom_u32_max(high - low)) ?: 1;
|
||||
|
||||
mod_delayed_work(system_wq, &inf->orphan_scan_dwork, delay);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1764,11 +1873,10 @@ static void schedule_orphan_dwork(struct inode_sb_info *inf)
|
||||
* Find and delete inodes whose only remaining reference is the
|
||||
* persistent orphan item that was created as they were unlinked.
|
||||
*
|
||||
* Orphan items are created as the final directory entry referring to an
|
||||
* inode is deleted. They're deleted as the final cached inode is
|
||||
* evicted and the inode items are destroyed. They can linger if all
|
||||
* the cached inodes pinning the inode fail to delete as they are
|
||||
* evicted from the cache -- either through crashing or errors.
|
||||
* Orphan items are maintained for inodes that have an nlink of 0.
|
||||
* Typically this is from unlink, but tmpfiles are created with orphans.
|
||||
* They're deleted as the final cached inode is evicted and the inode
|
||||
* items are destroyed.
|
||||
*
|
||||
* This work runs in all mounts in the background looking for those
|
||||
* orphaned inodes that weren't fully deleted.
|
||||
@@ -1777,20 +1885,16 @@ static void schedule_orphan_dwork(struct inode_sb_info *inf)
|
||||
* only find orphan items that made it to the fs root after being merged
|
||||
* from a mount's log btree. This naturally avoids orphan items that
|
||||
* exist while inodes have been unlinked but are still cached, including
|
||||
* O_TMPFILE inodes that are actively used during normal operations.
|
||||
* tmpfile inodes that are actively used during normal operations.
|
||||
* Scanning the read-only persistent fs root uses cached blocks and
|
||||
* avoids the lock contention we'd cause if we tried to use the
|
||||
* consistent item cache. The downside is that it adds a bit of
|
||||
* latency. If an orphan was created in error it'll take until the
|
||||
* mount's log btree is finalized and merged. A crash will have the log
|
||||
* btree merged after it is fenced.
|
||||
* latency.
|
||||
*
|
||||
* Once we find candidate orphan items we can first check our local
|
||||
* inode cache for inodes that are already on their way to eviction and
|
||||
* can be skipped. Then we ask the server for the open map containing
|
||||
* the inode. Only if we don't have it cached, and no one else does, do
|
||||
* we try and read it into our cache and evict it to trigger the final
|
||||
* inode deletion process.
|
||||
* Once we find candidate orphan items we first check our local omap for
|
||||
* a locally cached inode. Then we ask the server for the open map
|
||||
* containing the inode. Only if we don't see any cached users do we do
|
||||
* the expensive work of acquiring locks to try and delete the items.
|
||||
*/
|
||||
static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -1802,7 +1906,6 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key last;
|
||||
struct scoutfs_key key;
|
||||
struct inode *inode;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
u64 ino;
|
||||
@@ -1841,17 +1944,14 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
scoutfs_inc_counter(sb, orphan_scan_item);
|
||||
ino = le64_to_cpu(key.sko_ino);
|
||||
|
||||
/* locally cached inodes will already be deleted */
|
||||
inode = scoutfs_ilookup(sb, ino);
|
||||
if (inode) {
|
||||
/* locally cached inodes will try to delete as they evict */
|
||||
if (scoutfs_omap_test(sb, ino)) {
|
||||
scoutfs_inc_counter(sb, orphan_scan_cached);
|
||||
iput(inode);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* get an omap that covers the orphaned ino */
|
||||
group_nr = ino >> SCOUTFS_OPEN_INO_MAP_SHIFT;
|
||||
bit_nr = ino & SCOUTFS_OPEN_INO_MAP_MASK;
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
if (le64_to_cpu(omap.args.group_nr) != group_nr) {
|
||||
ret = scoutfs_client_open_ino_map(sb, group_nr, &omap);
|
||||
@@ -1859,25 +1959,15 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* don't need to evict if someone else has it open (cached) */
|
||||
/* remote cached inodes will also try to delete */
|
||||
if (test_bit_le(bit_nr, omap.bits)) {
|
||||
scoutfs_inc_counter(sb, orphan_scan_omap_set);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* try to cached and evict unused inode to delete, can be racing */
|
||||
inode = scoutfs_iget(sb, ino, 0, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
if (ret == -ENOENT)
|
||||
continue;
|
||||
else
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, orphan_scan_read);
|
||||
SCOUTFS_I(inode)->drop_invalidated = true;
|
||||
iput(inode);
|
||||
/* seemingly orphaned and unused, get locks and check for sure */
|
||||
scoutfs_inc_counter(sb, orphan_scan_attempts);
|
||||
ret = try_delete_inode_items(sb, ino);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
@@ -1886,7 +1976,7 @@ out:
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, orphan_scan_error);
|
||||
|
||||
schedule_orphan_dwork(inf);
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1994,8 +2084,6 @@ int scoutfs_inode_setup(struct super_block *sb)
|
||||
spin_lock_init(&inf->dir_ino_alloc.lock);
|
||||
spin_lock_init(&inf->ino_alloc.lock);
|
||||
INIT_DELAYED_WORK(&inf->orphan_scan_dwork, inode_orphan_scan_worker);
|
||||
spin_lock_init(&inf->deleting_items_lock);
|
||||
INIT_LIST_HEAD(&inf->deleting_items_list);
|
||||
INIT_WORK(&inf->iput_work, iput_worker);
|
||||
init_llist_head(&inf->iput_llist);
|
||||
|
||||
@@ -2011,9 +2099,7 @@ int scoutfs_inode_setup(struct super_block *sb)
|
||||
*/
|
||||
void scoutfs_inode_start(struct super_block *sb)
|
||||
{
|
||||
DECLARE_INODE_SB_INFO(sb, inf);
|
||||
|
||||
schedule_orphan_dwork(inf);
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -82,7 +82,9 @@ void scoutfs_inode_queue_iput(struct inode *inode);
|
||||
|
||||
#define SCOUTFS_IGF_LINKED (1 << 0) /* enoent if nlink == 0 */
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf);
|
||||
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup_nowait(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup_nowait_nonewfree(struct super_block *sb, u64 ino);
|
||||
|
||||
|
||||
void scoutfs_inode_init_key(struct scoutfs_key *key, u64 ino);
|
||||
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
|
||||
@@ -104,9 +106,8 @@ void scoutfs_update_inode_item(struct inode *inode, struct scoutfs_lock *lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret);
|
||||
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t rdev, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t rdev,
|
||||
u64 ino, struct scoutfs_lock *lock, struct inode **inode_ret);
|
||||
|
||||
void scoutfs_inode_set_meta_seq(struct inode *inode);
|
||||
void scoutfs_inode_set_data_seq(struct inode *inode);
|
||||
@@ -126,6 +127,7 @@ int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock);
|
||||
int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_lock *lock);
|
||||
void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode);
|
||||
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);
|
||||
|
||||
@@ -387,7 +387,7 @@ static long scoutfs_ioc_data_wait_err(struct file *file, unsigned long arg)
|
||||
if (sblock > eblock)
|
||||
return -EINVAL;
|
||||
|
||||
inode = scoutfs_ilookup(sb, args.ino);
|
||||
inode = scoutfs_ilookup_nowait_nonewfree(sb, args.ino);
|
||||
if (!inode) {
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
|
||||
@@ -142,7 +142,7 @@ static void invalidate_inode(struct super_block *sb, u64 ino)
|
||||
struct scoutfs_inode_info *si;
|
||||
struct inode *inode;
|
||||
|
||||
inode = scoutfs_ilookup(sb, ino);
|
||||
inode = scoutfs_ilookup_nowait_nonewfree(sb, ino);
|
||||
if (inode) {
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
@@ -255,7 +255,7 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
BUG_ON(!list_empty(&lock->cov_list));
|
||||
|
||||
scoutfs_omap_free_lock_data(lock->omap_data);
|
||||
kfree(lock->inode_deletion_data);
|
||||
kfree(lock);
|
||||
}
|
||||
|
||||
@@ -291,7 +291,6 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
lock->mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
atomic64_set(&lock->forest_bloom_nr, 0);
|
||||
spin_lock_init(&lock->omap_spinlock);
|
||||
|
||||
trace_scoutfs_lock_alloc(sb, lock);
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
|
||||
|
||||
struct scoutfs_omap_lock;
|
||||
struct inode_deletion_lock_data;
|
||||
|
||||
/*
|
||||
* A few fields (start, end, refresh_gen, write_seq, granted_mode)
|
||||
@@ -47,9 +47,8 @@ struct scoutfs_lock {
|
||||
/* the forest tracks which log tree last saw bloom bit updates */
|
||||
atomic64_t forest_bloom_nr;
|
||||
|
||||
/* open ino mapping has a valid map for a held write lock */
|
||||
spinlock_t omap_spinlock;
|
||||
struct scoutfs_omap_lock_data *omap_data;
|
||||
/* inode deletion tracks some state per lock */
|
||||
struct inode_deletion_lock_data *inode_deletion_data;
|
||||
};
|
||||
|
||||
struct scoutfs_lock_coverage {
|
||||
|
||||
@@ -749,7 +749,7 @@ out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server err %d during client rid %016llx farewell, shutting down",
|
||||
ret, rid);
|
||||
scoutfs_server_abort(sb);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -1292,7 +1292,7 @@ restart:
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "client fence returned err %d, shutting down server",
|
||||
ret);
|
||||
scoutfs_server_abort(sb);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
}
|
||||
destroy_conn(acc);
|
||||
|
||||
296
kmod/src/omap.c
296
kmod/src/omap.c
@@ -30,27 +30,22 @@
|
||||
/*
|
||||
* As a client removes an inode from its cache with an nlink of 0 it
|
||||
* needs to decide if it is the last client using the inode and should
|
||||
* fully delete all its items. It needs to know if other mounts still
|
||||
* have the inode in use.
|
||||
* fully delete all the inode's items. It needs to know if other mounts
|
||||
* still have the inode in use.
|
||||
*
|
||||
* We need a way to communicate between mounts that an inode is open.
|
||||
* We need a way to communicate between mounts that an inode is in use.
|
||||
* We don't want to pay the synchronous per-file locking round trip
|
||||
* costs associated with per-inode open locks that you'd typically see
|
||||
* in systems to solve this problem.
|
||||
* in systems to solve this problem. The first prototypes of this
|
||||
* tracked open file handles so this was coined the open map, though it
|
||||
* now tracks cached inodes.
|
||||
*
|
||||
* Instead clients maintain open bitmaps that cover groups of inodes.
|
||||
* As inodes enter the cache their bit is set, and as the inode is
|
||||
* evicted the bit is cleared. As an inode is evicted messages are sent
|
||||
* around the cluster to get the current bitmaps for that inode's group
|
||||
* from all active mounts. If the inode's bit is clear then it can be
|
||||
* deleted.
|
||||
*
|
||||
* We associate the open bitmaps with our cluster locking of inode
|
||||
* groups to cache these open bitmaps. As long as we have the lock then
|
||||
* nlink can't be changed on any remote mounts. Specifically, it can't
|
||||
* increase from 0 so any clear bits can gain references on remote
|
||||
* mounts. As long as we have the lock, all clear bits in the group for
|
||||
* inodes with 0 nlink can be deleted.
|
||||
* Clients maintain bitmaps that cover groups of inodes. As inodes
|
||||
* enter the cache their bit is set and as the inode is evicted the bit
|
||||
* is cleared. As deletion is attempted, either by scanning orphans or
|
||||
* evicting an inode with an nlink of 0, messages are sent around the
|
||||
* cluster to get the current bitmaps for that inode's group from all
|
||||
* active mounts. If the inode's bit is clear then it can be deleted.
|
||||
*
|
||||
* This layer maintains a list of client rids to send messages to. The
|
||||
* server calls us as clients enter and leave the cluster. We can't
|
||||
@@ -85,14 +80,12 @@ struct omap_info {
|
||||
struct omap_info *name = SCOUTFS_SB(sb)->omap_info
|
||||
|
||||
/*
|
||||
* The presence of an inode in the inode cache increases the count of
|
||||
* its inode number's position within its lock group. These structs
|
||||
* track the counts for all the inodes in a lock group and maintain a
|
||||
* bitmap whose bits are set for each non-zero count.
|
||||
* The presence of an inode in the inode sets its bit in the lock
|
||||
* group's bitmap.
|
||||
*
|
||||
* We don't want to add additional global synchronization of inode cache
|
||||
* maintenance so these are tracked in an rcu hash table. Once their
|
||||
* total count reaches zero they're removed from the hash and queued for
|
||||
* total reaches zero they're removed from the hash and queued for
|
||||
* freeing and readers should ignore them.
|
||||
*/
|
||||
struct omap_group {
|
||||
@@ -102,7 +95,6 @@ struct omap_group {
|
||||
u64 nr;
|
||||
spinlock_t lock;
|
||||
unsigned int total;
|
||||
unsigned int *counts;
|
||||
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
|
||||
};
|
||||
|
||||
@@ -111,8 +103,7 @@ do { \
|
||||
__typeof__(group) _grp = (group); \
|
||||
__typeof__(bit_nr) _nr = (bit_nr); \
|
||||
\
|
||||
trace_scoutfs_omap_group_##which(sb, _grp, _grp->nr, _grp->total, _nr, \
|
||||
_nr < 0 ? -1 : _grp->counts[_nr]); \
|
||||
trace_scoutfs_omap_group_##which(sb, _grp, _grp->nr, _grp->total, _nr); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@@ -134,18 +125,6 @@ struct omap_request {
|
||||
struct scoutfs_open_ino_map map;
|
||||
};
|
||||
|
||||
/*
|
||||
* In each inode group cluster lock we store data to track the open ino
|
||||
* map which tracks all the inodes that the cluster lock covers. When
|
||||
* the seq shows that the map is stale we send a request to update it.
|
||||
*/
|
||||
struct scoutfs_omap_lock_data {
|
||||
u64 seq;
|
||||
bool req_in_flight;
|
||||
wait_queue_head_t waitq;
|
||||
struct scoutfs_open_ino_map map;
|
||||
};
|
||||
|
||||
static inline void init_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
INIT_LIST_HEAD(&list->head);
|
||||
@@ -232,7 +211,7 @@ static void free_rids(struct omap_rid_list *list)
|
||||
}
|
||||
}
|
||||
|
||||
static void calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr)
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr)
|
||||
{
|
||||
*group_nr = ino >> SCOUTFS_OPEN_INO_MAP_SHIFT;
|
||||
*bit_nr = ino & SCOUTFS_OPEN_INO_MAP_MASK;
|
||||
@@ -242,21 +221,13 @@ static struct omap_group *alloc_group(struct super_block *sb, u64 group_nr)
|
||||
{
|
||||
struct omap_group *group;
|
||||
|
||||
BUILD_BUG_ON((sizeof(group->counts[0]) * SCOUTFS_OPEN_INO_MAP_BITS) > PAGE_SIZE);
|
||||
|
||||
group = kzalloc(sizeof(struct omap_group), GFP_NOFS);
|
||||
if (group) {
|
||||
group->sb = sb;
|
||||
group->nr = group_nr;
|
||||
spin_lock_init(&group->lock);
|
||||
|
||||
group->counts = (void *)get_zeroed_page(GFP_NOFS);
|
||||
if (!group->counts) {
|
||||
kfree(group);
|
||||
group = NULL;
|
||||
} else {
|
||||
trace_group(sb, alloc, group, -1);
|
||||
}
|
||||
trace_group(sb, alloc, group, -1);
|
||||
}
|
||||
|
||||
return group;
|
||||
@@ -265,7 +236,6 @@ static struct omap_group *alloc_group(struct super_block *sb, u64 group_nr)
|
||||
static void free_group(struct super_block *sb, struct omap_group *group)
|
||||
{
|
||||
trace_group(sb, free, group, -1);
|
||||
free_page((unsigned long)group->counts);
|
||||
kfree(group);
|
||||
}
|
||||
|
||||
@@ -283,13 +253,16 @@ static const struct rhashtable_params group_ht_params = {
|
||||
};
|
||||
|
||||
/*
|
||||
* Track an cached inode in its group. Our increment can be racing with
|
||||
* a final decrement that removes the group from the hash, sets total to
|
||||
* Track an cached inode in its group. Our set can be racing with a
|
||||
* final clear that removes the group from the hash, sets total to
|
||||
* UINT_MAX, and calls rcu free. We can retry until the dead group is
|
||||
* no longer visible in the hash table and we can insert a new allocated
|
||||
* group.
|
||||
*
|
||||
* The caller must ensure that the bit is clear, -EEXIST will be
|
||||
* returned otherwise.
|
||||
*/
|
||||
int scoutfs_omap_inc(struct super_block *sb, u64 ino)
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
@@ -298,7 +271,7 @@ int scoutfs_omap_inc(struct super_block *sb, u64 ino)
|
||||
bool found;
|
||||
int ret = 0;
|
||||
|
||||
calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
retry:
|
||||
found = false;
|
||||
@@ -308,10 +281,10 @@ retry:
|
||||
spin_lock(&group->lock);
|
||||
if (group->total < UINT_MAX) {
|
||||
found = true;
|
||||
if (group->counts[bit_nr]++ == 0) {
|
||||
set_bit_le(bit_nr, group->bits);
|
||||
if (WARN_ON_ONCE(test_and_set_bit_le(bit_nr, group->bits)))
|
||||
ret = -EEXIST;
|
||||
else
|
||||
group->total++;
|
||||
}
|
||||
}
|
||||
trace_group(sb, inc, group, bit_nr);
|
||||
spin_unlock(&group->lock);
|
||||
@@ -342,29 +315,50 @@ retry:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
bool ret = false;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
ret = !!test_bit_le(bit_nr, group->bits);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrement a previously incremented ino count. Not finding a count
|
||||
* implies imbalanced inc/dec or bugs freeing groups. We only free
|
||||
* groups here as the last dec drops the group's total count to 0.
|
||||
* Clear a previously set ino bit. Trying to clear a bit that's already
|
||||
* clear implies imbalanced set/clear or bugs freeing groups. We only
|
||||
* free groups here as the last clear drops the group's total to 0.
|
||||
*/
|
||||
void scoutfs_omap_dec(struct super_block *sb, u64 ino)
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
WARN_ON_ONCE(group->counts[bit_nr] == 0);
|
||||
WARN_ON_ONCE(!test_bit_le(bit_nr, group->bits));
|
||||
WARN_ON_ONCE(group->total == 0);
|
||||
WARN_ON_ONCE(group->total == UINT_MAX);
|
||||
if (--group->counts[bit_nr] == 0) {
|
||||
clear_bit_le(bit_nr, group->bits);
|
||||
if (test_and_clear_bit_le(bit_nr, group->bits)) {
|
||||
if (--group->total == 0) {
|
||||
group->total = UINT_MAX;
|
||||
rhashtable_remove_fast(&ominf->group_ht, &group->ht_head,
|
||||
@@ -664,8 +658,7 @@ int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
|
||||
|
||||
/*
|
||||
* The client is receiving a request from the server for its map for the
|
||||
* given group. Look up the group and copy the bits to the map for
|
||||
* non-zero open counts.
|
||||
* given group. Look up the group and copy the bits to the map.
|
||||
*
|
||||
* The mount originating the request for this bitmap has the inode group
|
||||
* write locked. We can't be adding links to any inodes in the group
|
||||
@@ -814,179 +807,6 @@ void scoutfs_omap_server_shutdown(struct super_block *sb)
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static bool omap_req_in_flight(struct scoutfs_lock *lock, struct scoutfs_omap_lock_data *ldata)
|
||||
{
|
||||
bool in_flight;
|
||||
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
in_flight = ldata->req_in_flight;
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
|
||||
return in_flight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the map covered by the cluster lock is current. The caller
|
||||
* holds the cluster lock so once we store lock_data on the cluster lock
|
||||
* it won't be freed and the write_seq in the cluster lock won't change.
|
||||
*
|
||||
* The omap_spinlock protects the omap_data in the cluster lock. We
|
||||
* have to drop it if we have to block to allocate lock_data, send a
|
||||
* request for a new map, or wait for a request in flight to finish.
|
||||
*/
|
||||
static int get_current_lock_data(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
struct scoutfs_omap_lock_data **ldata_ret, u64 group_nr)
|
||||
{
|
||||
struct scoutfs_omap_lock_data *ldata;
|
||||
bool send_req;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
|
||||
ldata = lock->omap_data;
|
||||
if (ldata == NULL) {
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
ldata = kzalloc(sizeof(struct scoutfs_omap_lock_data), GFP_NOFS);
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
|
||||
if (!ldata) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (lock->omap_data == NULL) {
|
||||
ldata->seq = lock->write_seq - 1; /* ensure refresh */
|
||||
init_waitqueue_head(&ldata->waitq);
|
||||
|
||||
lock->omap_data = ldata;
|
||||
} else {
|
||||
kfree(ldata);
|
||||
ldata = lock->omap_data;
|
||||
}
|
||||
}
|
||||
|
||||
while (ldata->seq != lock->write_seq) {
|
||||
/* only one waiter sends a request at a time */
|
||||
if (!ldata->req_in_flight) {
|
||||
ldata->req_in_flight = true;
|
||||
send_req = true;
|
||||
} else {
|
||||
send_req = false;
|
||||
}
|
||||
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
if (send_req)
|
||||
ret = scoutfs_client_open_ino_map(sb, group_nr, &ldata->map);
|
||||
else
|
||||
wait_event(ldata->waitq, !omap_req_in_flight(lock, ldata));
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
|
||||
/* only sender can return error, other waiters retry */
|
||||
if (send_req) {
|
||||
ldata->req_in_flight = false;
|
||||
if (ret == 0)
|
||||
ldata->seq = lock->write_seq;
|
||||
wake_up(&ldata->waitq);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
|
||||
if (ret == 0)
|
||||
*ldata_ret = ldata;
|
||||
else
|
||||
*ldata_ret = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 and give the caller their locks when they should delete the
|
||||
* inode items. It's safe to delete the inode items when it is no
|
||||
* longer reachable and nothing is referencing it.
|
||||
*
|
||||
* The inode is unreachable when nlink hits zero. Cluster locks protect
|
||||
* modification and testing of nlink. We use the ino_lock_cov covrage
|
||||
* to short circuit the common case of having a locked inode that hasn't
|
||||
* been deleted. If it isn't locked, we have to acquire the lock to
|
||||
* refresh the inode to see its current nlink.
|
||||
*
|
||||
* Then we use an open inode bitmap that covers all the inodes in the
|
||||
* lock group to determine if the inode is present in any other mount's
|
||||
* caches. We refresh it by asking the server for all clients' maps and
|
||||
* then store it in the lock. As long as we hold the lock nothing can
|
||||
* increase nlink from zero and let people get a reference to the inode.
|
||||
*/
|
||||
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_lock **lock_ret, struct scoutfs_lock **orph_lock_ret)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_omap_lock_data *ldata;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
/* lock group and omap constants are defined independently */
|
||||
BUILD_BUG_ON(SCOUTFS_OPEN_INO_MAP_BITS != SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
|
||||
if (scoutfs_lock_is_covered(sb, &si->ino_lock_cov) && inode->i_nlink > 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (inode->i_nlink > 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
/* only one request to refresh the map at a time */
|
||||
ret = get_current_lock_data(sb, lock, &ldata, group_nr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* can delete caller's zero nlink inode if it's not cached in other mounts */
|
||||
ret = !test_bit_le(bit_nr, ldata->map.bits);
|
||||
out:
|
||||
trace_scoutfs_omap_should_delete(sb, ino, inode->i_nlink, ret);
|
||||
|
||||
if (ret > 0) {
|
||||
err = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, ino, &orph_lock);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
if (ret <= 0) {
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
lock = NULL;
|
||||
}
|
||||
|
||||
*lock_ret = lock;
|
||||
*orph_lock_ret = orph_lock;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata)
|
||||
{
|
||||
if (ldata) {
|
||||
WARN_ON_ONCE(ldata->req_in_flight);
|
||||
WARN_ON_ONCE(waitqueue_active(&ldata->waitq));
|
||||
kfree(ldata);
|
||||
}
|
||||
}
|
||||
|
||||
int scoutfs_omap_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#ifndef _SCOUTFS_OMAP_H_
|
||||
#define _SCOUTFS_OMAP_H_
|
||||
|
||||
int scoutfs_omap_inc(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_dec(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_lock **lock_ret, struct scoutfs_lock **orph_lock_ret);
|
||||
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata);
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino);
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr);
|
||||
|
||||
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid);
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid);
|
||||
|
||||
@@ -26,22 +26,30 @@
|
||||
#include "msg.h"
|
||||
#include "options.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
|
||||
enum {
|
||||
Opt_metadev_path,
|
||||
Opt_orphan_scan_delay_ms,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_orphan_scan_delay_ms, "orphan_scan_delay_ms=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
struct options_sb_info {
|
||||
struct dentry *debugfs_dir;
|
||||
struct options_info {
|
||||
seqlock_t seqlock;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_sysfs_attrs sysfs_attrs;
|
||||
};
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
#define DECLARE_OPTIONS_INFO(sb, name) \
|
||||
struct options_info *name = SCOUTFS_SB(sb)->options_info
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
@@ -89,8 +97,29 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
static void free_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
kfree(opts->metadev_path);
|
||||
}
|
||||
|
||||
#define MIN_ORPHAN_SCAN_DELAY_MS 100UL
|
||||
#define DEFAULT_ORPHAN_SCAN_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_ORPHAN_SCAN_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
memset(opts, 0, sizeof(*opts));
|
||||
opts->quorum_slot_nr = -1;
|
||||
opts->orphan_scan_delay_ms = DEFAULT_ORPHAN_SCAN_DELAY_MS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the option string into our options struct. This can allocate
|
||||
* memory in the struct. The caller is responsible for always calling
|
||||
* free_options() when the struct is destroyed, including when we return
|
||||
* an error.
|
||||
*/
|
||||
static int parse_options(struct super_block *sb, char *options, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int nr;
|
||||
@@ -98,49 +127,61 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_quorum_slot_nr:
|
||||
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
case Opt_metadev_path:
|
||||
ret = parse_bdev_path(sb, &args[0], &opts->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case Opt_orphan_scan_delay_ms:
|
||||
if (opts->orphan_scan_delay_ms != -1) {
|
||||
scoutfs_err(sb, "multiple orphan_scan_delay_ms options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 ||
|
||||
nr < MIN_ORPHAN_SCAN_DELAY_MS || nr > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms option, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->orphan_scan_delay_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_quorum_slot_nr:
|
||||
if (opts->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
if (ret < 0 || nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
parsed->quorum_slot_nr = nr;
|
||||
opts->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
ret = parse_bdev_path(sb, &args[0],
|
||||
&parsed->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"",
|
||||
p);
|
||||
break;
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"", p);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!parsed->metadev_path) {
|
||||
if (!opts->metadev_path) {
|
||||
scoutfs_err(sb, "Required mount option \"metadev_path\" not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -148,40 +189,181 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
unsigned int seq;
|
||||
|
||||
if (WARN_ON_ONCE(optinf == NULL)) {
|
||||
/* trying to use options before early setup or after destroy */
|
||||
init_default_options(opts);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&optinf->seqlock);
|
||||
memcpy(opts, &optinf->opts, sizeof(struct scoutfs_mount_options));
|
||||
} while (read_seqretry(&optinf->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Early setup that parses and stores the options so that the rest of
|
||||
* setup can use them. Full options setup that relies on other
|
||||
* components will be done later.
|
||||
*/
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct options_sb_info *osi;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct options_info *optinf;
|
||||
int ret;
|
||||
|
||||
osi = kzalloc(sizeof(struct options_sb_info), GFP_KERNEL);
|
||||
if (!osi)
|
||||
return -ENOMEM;
|
||||
init_default_options(&opts);
|
||||
|
||||
sbi->options = osi;
|
||||
ret = parse_options(sb, options, &opts);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
osi->debugfs_dir = debugfs_create_dir("options", sbi->debug_root);
|
||||
if (!osi->debugfs_dir) {
|
||||
optinf = kzalloc(sizeof(struct options_info), GFP_KERNEL);
|
||||
if (!optinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seqlock_init(&optinf->seqlock);
|
||||
scoutfs_sysfs_init_attrs(sb, &optinf->sysfs_attrs);
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts = opts;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
sbi->options_info = optinf;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
free_options(&opts);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
seq_printf(seq, ",metadev_path=%s", opts.metadev_path);
|
||||
seq_printf(seq, ",orphan_scan_delay_ms=%u", opts.orphan_scan_delay_ms);
|
||||
if (opts.quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts.quorum_slot_nr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts.metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t orphan_scan_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.orphan_scan_delay_ms);
|
||||
}
|
||||
static ssize_t orphan_scan_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_ORPHAN_SCAN_DELAY_MS || val > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms value written to options sysfs file, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.orphan_scan_delay_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(orphan_scan_delay_ms);
|
||||
|
||||
static ssize_t quorum_slot_nr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts.quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_slot_nr);
|
||||
|
||||
static struct attribute *options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(orphan_scan_delay_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_slot_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &optinf->sysfs_attrs, options_attrs, "mount_options");
|
||||
if (ret < 0)
|
||||
scoutfs_options_destroy(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We remove the sysfs files early in unmount so that they can't try to call other subsystems
|
||||
* as they're being destroyed.
|
||||
*/
|
||||
void scoutfs_options_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (optinf)
|
||||
scoutfs_sysfs_destroy_attrs(sb, &optinf->sysfs_attrs);
|
||||
}
|
||||
|
||||
void scoutfs_options_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct options_sb_info *osi = sbi->options;
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (osi) {
|
||||
if (osi->debugfs_dir)
|
||||
debugfs_remove_recursive(osi->debugfs_dir);
|
||||
kfree(osi);
|
||||
sbi->options = NULL;
|
||||
scoutfs_options_stop(sb);
|
||||
|
||||
if (optinf) {
|
||||
free_options(&optinf->opts);
|
||||
kfree(optinf);
|
||||
sbi->options_info = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,23 +5,19 @@
|
||||
#include <linux/in.h>
|
||||
#include "format.h"
|
||||
|
||||
enum scoutfs_mount_options {
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
struct mount_options {
|
||||
int quorum_slot_nr;
|
||||
struct scoutfs_mount_options {
|
||||
char *metadev_path;
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
int quorum_slot_nr;
|
||||
|
||||
};
|
||||
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed);
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts);
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root);
|
||||
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options);
|
||||
int scoutfs_options_setup(struct super_block *sb);
|
||||
void scoutfs_options_stop(struct super_block *sb);
|
||||
void scoutfs_options_destroy(struct super_block *sb);
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token);
|
||||
#define scoutfs_option_bool scoutfs_option_u32
|
||||
|
||||
#endif /* _SCOUTFS_OPTIONS_H_ */
|
||||
|
||||
@@ -105,6 +105,8 @@ enum quorum_role { FOLLOWER, CANDIDATE, LEADER };
|
||||
struct quorum_status {
|
||||
enum quorum_role role;
|
||||
u64 term;
|
||||
u64 server_start_term;
|
||||
int server_event;
|
||||
int vote_for;
|
||||
unsigned long vote_bits;
|
||||
ktime_t timeout;
|
||||
@@ -116,7 +118,7 @@ struct quorum_info {
|
||||
struct socket *sock;
|
||||
bool shutdown;
|
||||
|
||||
unsigned long flags;
|
||||
int our_quorum_slot_nr;
|
||||
int votes_needed;
|
||||
|
||||
spinlock_t show_lock;
|
||||
@@ -127,8 +129,6 @@ struct quorum_info {
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
};
|
||||
|
||||
#define QINF_FLAG_SERVER 0
|
||||
|
||||
#define DECLARE_QUORUM_INFO(sb, name) \
|
||||
struct quorum_info *name = SCOUTFS_SB(sb)->quorum_info
|
||||
#define DECLARE_QUORUM_INFO_KOBJ(kobj, name) \
|
||||
@@ -160,9 +160,7 @@ static ktime_t heartbeat_timeout(void)
|
||||
static int create_socket(struct super_block *sb)
|
||||
{
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct socket *sock = NULL;
|
||||
struct sockaddr_in sin;
|
||||
int addrlen;
|
||||
@@ -176,7 +174,7 @@ static int create_socket(struct super_block *sb)
|
||||
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
scoutfs_quorum_slot_sin(super, opts->quorum_slot_nr, &sin);
|
||||
scoutfs_quorum_slot_sin(super, qinf->our_quorum_slot_nr, &sin);
|
||||
|
||||
addrlen = sizeof(sin);
|
||||
ret = kernel_bind(sock, (struct sockaddr *)&sin, addrlen);
|
||||
@@ -207,7 +205,6 @@ static void send_msg_members(struct super_block *sb, int type, u64 term,
|
||||
int only)
|
||||
{
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
ktime_t now;
|
||||
int i;
|
||||
@@ -216,7 +213,7 @@ static void send_msg_members(struct super_block *sb, int type, u64 term,
|
||||
.fsid = super->hdr.fsid,
|
||||
.term = cpu_to_le64(term),
|
||||
.type = type,
|
||||
.from = opts->quorum_slot_nr,
|
||||
.from = qinf->our_quorum_slot_nr,
|
||||
};
|
||||
struct kvec kv = {
|
||||
.iov_base = &qmes,
|
||||
@@ -238,7 +235,7 @@ static void send_msg_members(struct super_block *sb, int type, u64 term,
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (!quorum_slot_present(super, i) ||
|
||||
(only >= 0 && i != only) || i == opts->quorum_slot_nr)
|
||||
(only >= 0 && i != only) || i == qinf->our_quorum_slot_nr)
|
||||
continue;
|
||||
|
||||
scoutfs_quorum_slot_sin(super, i, &sin);
|
||||
@@ -476,8 +473,8 @@ static int write_quorum_block(struct super_block *sb, u64 blkno, struct scoutfs_
|
||||
*/
|
||||
static int update_quorum_block(struct super_block *sb, int event, u64 term, bool check_rid)
|
||||
{
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
u64 blkno = SCOUTFS_QUORUM_BLKNO + opts->quorum_slot_nr;
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
u64 blkno = SCOUTFS_QUORUM_BLKNO + qinf->our_quorum_slot_nr;
|
||||
struct scoutfs_quorum_block blk;
|
||||
int ret;
|
||||
|
||||
@@ -496,16 +493,6 @@ static int update_quorum_block(struct super_block *sb, int event, u64 term, bool
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has fenced previous leaders and reclaimed their
|
||||
* resources. We can now update our fence event with a greater term to
|
||||
* stop future leaders from doing the same.
|
||||
*/
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term)
|
||||
{
|
||||
return update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_FENCE, term, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has been elected and has started running but can't
|
||||
* yet assume that it has exclusive access to the metadata device. We
|
||||
@@ -595,15 +582,9 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term)
|
||||
}
|
||||
|
||||
out:
|
||||
if (fence_started) {
|
||||
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
} else {
|
||||
err = scoutfs_quorum_fence_complete(sb, term);
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, quorum_fence_error);
|
||||
@@ -611,21 +592,34 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The main quorum task maintains its private status. It seemed cleaner
|
||||
* to occasionally copy the status for showing in sysfs/debugfs files
|
||||
* than to have the two lock access to shared status. The show copy is
|
||||
* updated after being modified before the quorum task sleeps for a
|
||||
* significant amount of time, either waiting on timeouts or interacting
|
||||
* with the server.
|
||||
*/
|
||||
static void update_show_status(struct quorum_info *qinf, struct quorum_status *qst)
|
||||
{
|
||||
spin_lock(&qinf->show_lock);
|
||||
qinf->show_status = *qst;
|
||||
spin_unlock(&qinf->show_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The quorum work always runs in the background of quorum member
|
||||
* mounts. It's responsible for starting and stopping the server if
|
||||
* it's elected leader, and the server can call back into it to let it
|
||||
* know that it has shut itself down (perhaps due to error) so that the
|
||||
* work should stop sending heartbeats.
|
||||
* it's elected leader. While it's leader it sends heartbeats to
|
||||
* suppress other quorum work from standing for election.
|
||||
*/
|
||||
static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
{
|
||||
struct quorum_info *qinf = container_of(work, struct quorum_info, work);
|
||||
struct super_block *sb = qinf->sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
struct sockaddr_in unused;
|
||||
struct quorum_host_msg msg;
|
||||
struct quorum_status qst;
|
||||
struct quorum_status qst = {0,};
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
@@ -634,9 +628,7 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
|
||||
/* start out as a follower */
|
||||
qst.role = FOLLOWER;
|
||||
qst.term = 0;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
|
||||
/* read our starting term from greatest in all events in all slots */
|
||||
read_greatest_term(sb, &qst.term);
|
||||
@@ -654,6 +646,8 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
|
||||
while (!(qinf->shutdown || scoutfs_forcing_unmount(sb))) {
|
||||
|
||||
update_show_status(qinf, &qst);
|
||||
|
||||
ret = recv_msg(sb, &msg, qst.timeout);
|
||||
if (ret < 0) {
|
||||
if (ret != -ETIMEDOUT && ret != -EAGAIN) {
|
||||
@@ -670,24 +664,6 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
msg.term < qst.term)
|
||||
msg.type = SCOUTFS_QUORUM_MSG_INVALID;
|
||||
|
||||
/* if the server has shutdown we become follower */
|
||||
if (!test_bit(QINF_FLAG_SERVER, &qinf->flags) &&
|
||||
qst.role == LEADER) {
|
||||
qst.role = FOLLOWER;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_server_shutdown);
|
||||
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.term);
|
||||
scoutfs_inc_counter(sb, quorum_send_resignation);
|
||||
}
|
||||
|
||||
spin_lock(&qinf->show_lock);
|
||||
qinf->show_status = qst;
|
||||
spin_unlock(&qinf->show_lock);
|
||||
|
||||
trace_scoutfs_quorum_loop(sb, qst.role, qst.term, qst.vote_for,
|
||||
qst.vote_bits,
|
||||
ktime_to_timespec64(qst.timeout));
|
||||
@@ -698,7 +674,6 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
if (qst.role == LEADER) {
|
||||
scoutfs_warn(sb, "saw msg type %u from %u for term %llu while leader in term %llu, shutting down server.",
|
||||
msg.type, msg.from, msg.term, qst.term);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
qst.role = FOLLOWER;
|
||||
qst.term = msg.term;
|
||||
@@ -720,11 +695,18 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
/* followers and candidates start new election on timeout */
|
||||
if (qst.role != LEADER &&
|
||||
ktime_after(ktime_get(), qst.timeout)) {
|
||||
/* .. but only if their server has stopped */
|
||||
if (!scoutfs_server_is_down(sb)) {
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_candidate_server_stopping);
|
||||
continue;
|
||||
}
|
||||
|
||||
qst.role = CANDIDATE;
|
||||
qst.term++;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
set_bit(opts->quorum_slot_nr, &qst.vote_bits);
|
||||
set_bit(qinf->our_quorum_slot_nr, &qst.vote_bits);
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_REQUEST_VOTE,
|
||||
qst.term);
|
||||
qst.timeout = election_timeout();
|
||||
@@ -761,29 +743,69 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
qst.term);
|
||||
qst.timeout = heartbeat_interval();
|
||||
|
||||
update_show_status(qinf, &qst);
|
||||
|
||||
/* record that we've been elected before starting up server */
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_ELECT, qst.term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* make very sure server is fully shut down */
|
||||
scoutfs_server_stop(sb);
|
||||
/* set server bit before server shutdown could clear */
|
||||
set_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
qst.server_start_term = qst.term;
|
||||
qst.server_event = SCOUTFS_QUORUM_EVENT_ELECT;
|
||||
scoutfs_server_start(sb, qst.term);
|
||||
}
|
||||
|
||||
ret = scoutfs_server_start(sb, qst.term);
|
||||
if (ret < 0) {
|
||||
clear_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
/* store our increased term */
|
||||
err = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP, qst.term,
|
||||
true);
|
||||
if (err < 0) {
|
||||
ret = err;
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
continue;
|
||||
/*
|
||||
* This leader's server is up, having finished fencing
|
||||
* previous leaders. We update the fence event with the
|
||||
* current term to let future leaders know that previous
|
||||
* servers have been fenced.
|
||||
*/
|
||||
if (qst.role == LEADER && qst.server_event != SCOUTFS_QUORUM_EVENT_FENCE &&
|
||||
scoutfs_server_is_up(sb)) {
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_FENCE, qst.term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
qst.server_event = SCOUTFS_QUORUM_EVENT_FENCE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop a running server if we're no longer leader in
|
||||
* its term.
|
||||
*/
|
||||
if (!(qst.role == LEADER && qst.term == qst.server_start_term) &&
|
||||
scoutfs_server_is_running(sb)) {
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* A previously running server has stopped. The quorum
|
||||
* protocol might have shut it down by changing roles or
|
||||
* it might have stopped on its own, perhaps on errors.
|
||||
* If we're still a leader then we become a follower and
|
||||
* send resignations to encourage the next election.
|
||||
* Always update the _STOP event to stop connections and
|
||||
* fencing.
|
||||
*/
|
||||
if (qst.server_start_term > 0 && scoutfs_server_is_down(sb)) {
|
||||
if (qst.role == LEADER) {
|
||||
qst.role = FOLLOWER;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_server_shutdown);
|
||||
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.server_start_term);
|
||||
scoutfs_inc_counter(sb, quorum_send_resignation);
|
||||
}
|
||||
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP,
|
||||
qst.server_start_term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
qst.server_start_term = 0;
|
||||
}
|
||||
|
||||
/* leaders regularly send heartbeats to delay elections */
|
||||
@@ -820,12 +842,19 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
update_show_status(qinf, &qst);
|
||||
|
||||
/* always try to stop a running server as we stop */
|
||||
if (test_bit(QINF_FLAG_SERVER, &qinf->flags)) {
|
||||
scoutfs_server_stop(sb);
|
||||
scoutfs_fence_stop(sb);
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.term);
|
||||
if (scoutfs_server_is_running(sb)) {
|
||||
scoutfs_server_stop_wait(sb);
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION, qst.term);
|
||||
|
||||
if (qst.server_start_term > 0) {
|
||||
err = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP,
|
||||
qst.server_start_term, true);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
}
|
||||
|
||||
/* record that this slot no longer has an active quorum */
|
||||
@@ -837,21 +866,6 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has shutdown and is no longer using shared
|
||||
* resources. Clear the bit so that we stop sending heartbeats and
|
||||
* allow the next server to be elected. Update the stop event so that
|
||||
* it won't be considered available by clients or fenced by the next
|
||||
* leader.
|
||||
*/
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
|
||||
clear_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP, term, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clients read quorum blocks looking for the leader with a server whose
|
||||
* address it can try and connect to.
|
||||
@@ -954,7 +968,6 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_QUORUM_INFO_KOBJ(kobj, qinf);
|
||||
struct mount_options *opts = &SCOUTFS_SB(qinf->sb)->opts;
|
||||
struct quorum_status qst;
|
||||
struct last_msg last;
|
||||
struct timespec64 ts;
|
||||
@@ -971,9 +984,11 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
ret = 0;
|
||||
|
||||
snprintf_ret(buf, size, &ret, "quorum_slot_nr %u\n",
|
||||
opts->quorum_slot_nr);
|
||||
qinf->our_quorum_slot_nr);
|
||||
snprintf_ret(buf, size, &ret, "term %llu\n",
|
||||
qst.term);
|
||||
snprintf_ret(buf, size, &ret, "server_start_term %llu\n", qst.server_start_term);
|
||||
snprintf_ret(buf, size, &ret, "server_event %d\n", qst.server_event);
|
||||
snprintf_ret(buf, size, &ret, "role %d (%s)\n",
|
||||
qst.role, role_str(qst.role));
|
||||
snprintf_ret(buf, size, &ret, "vote_for %d\n",
|
||||
@@ -1048,7 +1063,6 @@ static inline bool valid_ipv4_port(__be16 port)
|
||||
static int verify_quorum_slots(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
char slots[(SCOUTFS_QUORUM_MAX_SLOTS * 3) + 1];
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct sockaddr_in other;
|
||||
@@ -1099,7 +1113,7 @@ static int verify_quorum_slots(struct super_block *sb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!quorum_slot_present(super, opts->quorum_slot_nr)) {
|
||||
if (!quorum_slot_present(super, qinf->our_quorum_slot_nr)) {
|
||||
char *str = slots;
|
||||
*str = '\0';
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
@@ -1114,7 +1128,7 @@ static int verify_quorum_slots(struct super_block *sb)
|
||||
}
|
||||
}
|
||||
scoutfs_err(sb, "quorum_slot_nr=%u option references unused slot, must be one of the following configured slots:%s",
|
||||
opts->quorum_slot_nr, slots);
|
||||
qinf->our_quorum_slot_nr, slots);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1137,11 +1151,12 @@ static int verify_quorum_slots(struct super_block *sb)
|
||||
int scoutfs_quorum_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct quorum_info *qinf;
|
||||
int ret;
|
||||
|
||||
if (opts->quorum_slot_nr < 0)
|
||||
scoutfs_options_read(sb, &opts);
|
||||
if (opts.quorum_slot_nr < 0)
|
||||
return 0;
|
||||
|
||||
qinf = kzalloc(sizeof(struct quorum_info), GFP_KERNEL);
|
||||
@@ -1153,6 +1168,8 @@ int scoutfs_quorum_setup(struct super_block *sb)
|
||||
spin_lock_init(&qinf->show_lock);
|
||||
INIT_WORK(&qinf->work, scoutfs_quorum_worker);
|
||||
scoutfs_sysfs_init_attrs(sb, &qinf->ssa);
|
||||
/* static for the lifetime of the mount */
|
||||
qinf->our_quorum_slot_nr = opts.quorum_slot_nr;
|
||||
|
||||
sbi->quorum_info = qinf;
|
||||
qinf->sb = sb;
|
||||
|
||||
@@ -2,14 +2,12 @@
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term);
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
|
||||
@@ -1843,6 +1843,53 @@ DEFINE_EVENT(scoutfs_server_client_count_class, scoutfs_server_client_down,
|
||||
TP_ARGS(sb, rid, nr_clients)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_server_commit_users_class,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, holding)
|
||||
__field(int, applying)
|
||||
__field(int, nr_holders)
|
||||
__field(__u32, avail_before)
|
||||
__field(__u32, freed_before)
|
||||
__field(int, exceeded)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->holding = !!holding;
|
||||
__entry->applying = !!applying;
|
||||
__entry->nr_holders = nr_holders;
|
||||
__entry->avail_before = avail_before;
|
||||
__entry->freed_before = freed_before;
|
||||
__entry->exceeded = !!exceeded;
|
||||
),
|
||||
TP_printk(SCSBF" holding %u applying %u nr %u avail_before %u freed_before %u exceeded %u",
|
||||
SCSB_TRACE_ARGS, __entry->holding, __entry->applying, __entry->nr_holders,
|
||||
__entry->avail_before, __entry->freed_before, __entry->exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_hold,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_apply,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_start,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_end,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
|
||||
#define slt_symbolic(mode) \
|
||||
__print_symbolic(mode, \
|
||||
{ SLT_CLIENT, "client" }, \
|
||||
@@ -2620,9 +2667,9 @@ TRACE_EVENT(scoutfs_item_invalidate_page,
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
int bit_nr),
|
||||
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
@@ -2630,7 +2677,6 @@ DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
|
||||
__field(__u64, group_nr)
|
||||
__field(unsigned int, group_total)
|
||||
__field(int, bit_nr)
|
||||
__field(int, bit_count)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -2639,43 +2685,42 @@ DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
|
||||
__entry->group_nr = group_nr;
|
||||
__entry->group_total = group_total;
|
||||
__entry->bit_nr = bit_nr;
|
||||
__entry->bit_count = bit_count;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" grp %p group_nr %llu group_total %u bit_nr %d bit_count %d",
|
||||
TP_printk(SCSBF" grp %p group_nr %llu group_total %u bit_nr %d",
|
||||
SCSB_TRACE_ARGS, __entry->grp, __entry->group_nr, __entry->group_total,
|
||||
__entry->bit_nr, __entry->bit_count)
|
||||
__entry->bit_nr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_alloc,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_free,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_inc,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_dec,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_request,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_destroy,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_omap_should_delete,
|
||||
|
||||
@@ -52,6 +52,41 @@
|
||||
* mount will become the leader and have less trouble.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Tracks all the holders and commit work that are operating on server
|
||||
* commits. It synchronizes holders modifying the blocks in the commit
|
||||
* and the commit work writing dirty blocks that make up a consistent
|
||||
* commit. It limits the number of active holders so that they don't
|
||||
* fully consume the allocation resources prepared for a commit.
|
||||
*/
|
||||
struct commit_users {
|
||||
wait_queue_head_t waitq;
|
||||
spinlock_t lock;
|
||||
struct list_head holding;
|
||||
struct list_head applying;
|
||||
unsigned int nr_holders;
|
||||
u32 avail_before;
|
||||
u32 freed_before;
|
||||
bool exceeded;
|
||||
};
|
||||
|
||||
static void init_commit_users(struct commit_users *cusers)
|
||||
{
|
||||
memset(cusers, 0, sizeof(struct commit_users));
|
||||
init_waitqueue_head(&cusers->waitq);
|
||||
spin_lock_init(&cusers->lock);
|
||||
INIT_LIST_HEAD(&cusers->holding);
|
||||
INIT_LIST_HEAD(&cusers->applying);
|
||||
}
|
||||
|
||||
#define TRACE_COMMIT_USERS(sb, cusers, which) \
|
||||
do { \
|
||||
__typeof__(cusers) _cusers = (cusers); \
|
||||
trace_scoutfs_server_commit_##which(sb, !list_empty(&_cusers->holding), \
|
||||
!list_empty(&_cusers->applying), _cusers->nr_holders, _cusers->avail_before, \
|
||||
_cusers->freed_before, _cusers->exceeded); \
|
||||
} while (0)
|
||||
|
||||
struct server_info {
|
||||
struct super_block *sb;
|
||||
spinlock_t lock;
|
||||
@@ -59,9 +94,7 @@ struct server_info {
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct work;
|
||||
int err;
|
||||
bool shutting_down;
|
||||
struct completion start_comp;
|
||||
int status;
|
||||
u64 term;
|
||||
struct scoutfs_net_connection *conn;
|
||||
|
||||
@@ -69,8 +102,7 @@ struct server_info {
|
||||
atomic64_t seq_atomic;
|
||||
|
||||
/* request processing coordinates shared commits */
|
||||
struct rw_semaphore commit_rwsem;
|
||||
struct llist_head commit_waiters;
|
||||
struct commit_users cusers;
|
||||
struct work_struct commit_work;
|
||||
|
||||
struct list_head clients;
|
||||
@@ -155,87 +187,286 @@ static bool get_volopt_val(struct server_info *server, int nr, u64 *val)
|
||||
return is_set;
|
||||
}
|
||||
|
||||
|
||||
struct commit_waiter {
|
||||
struct completion comp;
|
||||
struct llist_node node;
|
||||
int ret;
|
||||
enum {
|
||||
SERVER_NOP = 0,
|
||||
SERVER_STARTING,
|
||||
SERVER_UP,
|
||||
SERVER_STOPPING,
|
||||
SERVER_DOWN,
|
||||
};
|
||||
|
||||
static bool test_shutting_down(struct server_info *server)
|
||||
bool scoutfs_server_is_running(struct super_block *sb)
|
||||
{
|
||||
smp_rmb();
|
||||
return server->shutting_down;
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
long was = cmpxchg(&server->status, SERVER_NOP, SERVER_NOP);
|
||||
|
||||
return was == SERVER_STARTING || was == SERVER_UP;
|
||||
}
|
||||
|
||||
static void set_shutting_down(struct server_info *server, bool val)
|
||||
bool scoutfs_server_is_up(struct super_block *sb)
|
||||
{
|
||||
server->shutting_down = val;
|
||||
smp_wmb();
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
return cmpxchg(&server->status, SERVER_NOP, SERVER_NOP) == SERVER_UP;
|
||||
}
|
||||
|
||||
bool scoutfs_server_is_down(struct super_block *sb)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
return cmpxchg(&server->status, SERVER_NOP, SERVER_NOP) == SERVER_DOWN;
|
||||
}
|
||||
|
||||
static bool server_is_stopping(struct server_info *server)
|
||||
{
|
||||
return cmpxchg(&server->status, SERVER_NOP, SERVER_NOP) == SERVER_STOPPING;
|
||||
}
|
||||
|
||||
static void stop_server(struct server_info *server)
|
||||
{
|
||||
set_shutting_down(server, true);
|
||||
wake_up(&server->waitq);
|
||||
long was = cmpxchg(&server->status, SERVER_NOP, SERVER_NOP);
|
||||
|
||||
if ((was == SERVER_STARTING || was == SERVER_UP) &&
|
||||
cmpxchg(&server->status, was, SERVER_STOPPING) == was)
|
||||
wake_up(&server->waitq);
|
||||
}
|
||||
|
||||
static void server_up(struct server_info *server)
|
||||
{
|
||||
cmpxchg(&server->status, SERVER_STARTING, SERVER_UP);
|
||||
}
|
||||
|
||||
static void server_down(struct server_info *server)
|
||||
{
|
||||
long was = cmpxchg(&server->status, SERVER_NOP, SERVER_NOP);
|
||||
|
||||
if (was != SERVER_DOWN)
|
||||
cmpxchg(&server->status, was, SERVER_DOWN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hold the shared rwsem that lets multiple holders modify blocks in the
|
||||
* current commit and prevents the commit worker from acquiring the
|
||||
* exclusive write lock to write the commit.
|
||||
* The per-holder allocation block use budget balances batching
|
||||
* efficiency and concurrency. The larger this gets, the fewer
|
||||
* concurrent server operations can be performed in one commit. Commits
|
||||
* are immediately written after being dirtied so this really only
|
||||
* limits immediate concurrency under load, not batching over time as
|
||||
* one might expect if commits were long lived.
|
||||
*
|
||||
* This is exported for server components isolated in their own files
|
||||
* (lock_server) and which are not called directly by the server core
|
||||
* (async timeout work).
|
||||
* The upper bound is determined by the server commit hold path that can
|
||||
* dirty the most blocks.
|
||||
*/
|
||||
void scoutfs_server_hold_commit(struct super_block *sb)
|
||||
#define COMMIT_HOLD_ALLOC_BUDGET 500
|
||||
|
||||
struct commit_hold {
|
||||
struct list_head entry;
|
||||
ktime_t start;
|
||||
u32 avail;
|
||||
u32 freed;
|
||||
int ret;
|
||||
bool exceeded;
|
||||
};
|
||||
|
||||
#define COMMIT_HOLD(name) \
|
||||
struct commit_hold name = { .entry = LIST_HEAD_INIT(name.entry) }
|
||||
|
||||
/*
|
||||
* See if the currently active holders have, all together, consumed more
|
||||
* allocation resources than they were allowed. We don't have
|
||||
* per-holder allocation consumption tracking. The best we can do is
|
||||
* flag all the current holders so that as they release we can see
|
||||
* everyone involved in crossing the limit.
|
||||
*/
|
||||
static void check_holder_budget(struct super_block *sb, struct server_info *server,
|
||||
struct commit_users *cusers)
|
||||
{
|
||||
static bool exceeded_once = false;
|
||||
struct commit_hold *hold;
|
||||
struct timespec ts;
|
||||
u32 avail_used;
|
||||
u32 freed_used;
|
||||
u32 avail_now;
|
||||
u32 freed_now;
|
||||
u32 budget;
|
||||
|
||||
assert_spin_locked(&cusers->lock);
|
||||
|
||||
if (cusers->exceeded || cusers->nr_holders == 0 || exceeded_once)
|
||||
return;
|
||||
|
||||
scoutfs_alloc_meta_remaining(&server->alloc, &avail_now, &freed_now);
|
||||
avail_used = cusers->avail_before - avail_now;
|
||||
freed_used = cusers->freed_before - freed_now;
|
||||
budget = cusers->nr_holders * COMMIT_HOLD_ALLOC_BUDGET;
|
||||
if (avail_used <= budget && freed_used <= budget)
|
||||
return;
|
||||
|
||||
exceeded_once = true;
|
||||
cusers->exceeded = cusers->nr_holders;
|
||||
|
||||
scoutfs_err(sb, "%u holders exceeded alloc budget av: bef %u now %u, fr: bef %u now %u",
|
||||
cusers->nr_holders, cusers->avail_before, avail_now,
|
||||
cusers->freed_before, freed_now);
|
||||
|
||||
list_for_each_entry(hold, &cusers->holding, entry) {
|
||||
ts = ktime_to_timespec(hold->start);
|
||||
scoutfs_err(sb, "exceeding hold start %llu.%09llu av %u fr %u",
|
||||
(u64)ts.tv_sec, (u64)ts.tv_nsec, hold->avail, hold->freed);
|
||||
hold->exceeded = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't have per-holder consumption. We allow commit holders as
|
||||
* long as the total budget of all the holders doesn't exceed the alloc
|
||||
* resources that were available
|
||||
*/
|
||||
static bool commit_alloc_has_room(struct server_info *server, struct commit_users *cusers,
|
||||
unsigned int more_holders)
|
||||
{
|
||||
u32 avail_before;
|
||||
u32 freed_before;
|
||||
u32 budget;
|
||||
|
||||
if (cusers->nr_holders > 0) {
|
||||
avail_before = cusers->avail_before;
|
||||
freed_before = cusers->freed_before;
|
||||
} else {
|
||||
scoutfs_alloc_meta_remaining(&server->alloc, &avail_before, &freed_before);
|
||||
}
|
||||
|
||||
budget = (cusers->nr_holders + more_holders) * COMMIT_HOLD_ALLOC_BUDGET;
|
||||
|
||||
return avail_before >= budget && freed_before >= budget;
|
||||
}
|
||||
|
||||
static bool hold_commit(struct super_block *sb, struct server_info *server,
|
||||
struct commit_users *cusers, struct commit_hold *hold)
|
||||
{
|
||||
bool held = false;
|
||||
|
||||
spin_lock(&cusers->lock);
|
||||
|
||||
TRACE_COMMIT_USERS(sb, cusers, hold);
|
||||
|
||||
check_holder_budget(sb, server, cusers);
|
||||
|
||||
/* +2 for our additional hold and then for the final commit work the server does */
|
||||
if (list_empty(&cusers->applying) && commit_alloc_has_room(server, cusers, 2)) {
|
||||
scoutfs_alloc_meta_remaining(&server->alloc, &hold->avail, &hold->freed);
|
||||
if (cusers->nr_holders == 0) {
|
||||
cusers->avail_before = hold->avail;
|
||||
cusers->freed_before = hold->freed;
|
||||
cusers->exceeded = false;
|
||||
}
|
||||
hold->exceeded = false;
|
||||
hold->start = ktime_get();
|
||||
list_add_tail(&hold->entry, &cusers->holding);
|
||||
cusers->nr_holders++;
|
||||
held = true;
|
||||
}
|
||||
|
||||
spin_unlock(&cusers->lock);
|
||||
|
||||
return held;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hold the server commit so that we can make a consistent change to the
|
||||
* dirty blocks in the commit. The commit won't be written while we
|
||||
* hold it.
|
||||
*/
|
||||
static void server_hold_commit(struct super_block *sb, struct commit_hold *hold)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct commit_users *cusers = &server->cusers;
|
||||
|
||||
BUG_ON(!list_empty(&hold->entry));
|
||||
|
||||
scoutfs_inc_counter(sb, server_commit_hold);
|
||||
|
||||
down_read(&server->commit_rwsem);
|
||||
wait_event(cusers->waitq, hold_commit(sb, server, cusers, hold));
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called while holding the commit and returns once the commit
|
||||
* is successfully written. Many holders can all wait for all holders
|
||||
* to drain before their shared commit is applied and they're all woken.
|
||||
*
|
||||
* It's important to realize that our commit_waiter list node might be
|
||||
* serviced by a currently executing commit work that is blocked waiting
|
||||
* for the holders to release the commit_rwsem. This caller can return
|
||||
* from wait_for_commit() while another future commit_work is still
|
||||
* queued.
|
||||
*
|
||||
* This could queue delayed work but we're first trying to have batching
|
||||
* work by having concurrent modification line up behind a commit in
|
||||
* flight. Once the commit finishes it'll unlock and hopefully everyone
|
||||
* will race to make their changes and they'll all be applied by the
|
||||
* next commit after that.
|
||||
*/
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err)
|
||||
static int server_apply_commit(struct super_block *sb, struct commit_hold *hold, int err)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct commit_waiter cw;
|
||||
struct commit_users *cusers = &server->cusers;
|
||||
struct timespec ts;
|
||||
bool start_commit;
|
||||
|
||||
spin_lock(&cusers->lock);
|
||||
|
||||
TRACE_COMMIT_USERS(sb, cusers, apply);
|
||||
|
||||
check_holder_budget(sb, server, cusers);
|
||||
|
||||
if (hold->exceeded) {
|
||||
ts = ktime_to_timespec(hold->start);
|
||||
scoutfs_err(sb, "exceeding hold start %llu.%09llu stack:",
|
||||
(u64)ts.tv_sec, (u64)ts.tv_nsec);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
if (err == 0) {
|
||||
cw.ret = 0;
|
||||
init_completion(&cw.comp);
|
||||
llist_add(&cw.node, &server->commit_waiters);
|
||||
scoutfs_inc_counter(sb, server_commit_queue);
|
||||
list_move_tail(&hold->entry, &cusers->applying);
|
||||
} else {
|
||||
list_del_init(&hold->entry);
|
||||
hold->ret = err;
|
||||
}
|
||||
cusers->nr_holders--;
|
||||
start_commit = cusers->nr_holders == 0 && !list_empty(&cusers->applying);
|
||||
spin_unlock(&cusers->lock);
|
||||
|
||||
if (start_commit)
|
||||
queue_work(server->wq, &server->commit_work);
|
||||
}
|
||||
|
||||
up_read(&server->commit_rwsem);
|
||||
wait_event(cusers->waitq, list_empty_careful(&hold->entry));
|
||||
smp_rmb(); /* entry load before ret */
|
||||
return hold->ret;
|
||||
}
|
||||
|
||||
if (err == 0) {
|
||||
wait_for_completion(&cw.comp);
|
||||
err = cw.ret;
|
||||
}
|
||||
/*
|
||||
* Start a commit from the commit work. We should only have been queued
|
||||
* while a holder is waiting to apply after all active holders have
|
||||
* finished.
|
||||
*/
|
||||
static int commit_start(struct super_block *sb, struct commit_users *cusers)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
return err;
|
||||
/* make sure holders held off once commit started */
|
||||
spin_lock(&cusers->lock);
|
||||
TRACE_COMMIT_USERS(sb, cusers, start);
|
||||
if (WARN_ON_ONCE(list_empty(&cusers->applying) || cusers->nr_holders != 0))
|
||||
ret = -EINVAL;
|
||||
spin_unlock(&cusers->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish a commit from the commit work. Give the result to all the
|
||||
* holders who are waiting for the commit to be applied.
|
||||
*/
|
||||
static void commit_end(struct super_block *sb, struct commit_users *cusers, int ret)
|
||||
{
|
||||
struct commit_hold *hold;
|
||||
struct commit_hold *tmp;
|
||||
|
||||
spin_lock(&cusers->lock);
|
||||
TRACE_COMMIT_USERS(sb, cusers, end);
|
||||
list_for_each_entry(hold, &cusers->applying, entry)
|
||||
hold->ret = ret;
|
||||
smp_wmb(); /* ret stores before list updates */
|
||||
list_for_each_entry_safe(hold, tmp, &cusers->applying, entry)
|
||||
list_del_init(&hold->entry);
|
||||
spin_unlock(&cusers->lock);
|
||||
|
||||
wake_up(&cusers->waitq);
|
||||
}
|
||||
|
||||
static void get_roots(struct super_block *sb,
|
||||
@@ -297,19 +528,17 @@ static void set_roots(struct server_info *server,
|
||||
* Concurrent request processing dirties blocks in a commit and makes
|
||||
* the modifications persistent before replying. We'd like to batch
|
||||
* these commits as much as is reasonable so that we don't degrade to a
|
||||
* few IO round trips per request.
|
||||
* few synchronous IOs per request.
|
||||
*
|
||||
* Getting that batching right is bound up in the concurrency of request
|
||||
* processing so a clear way to implement the batched commits is to
|
||||
* implement commits with a single pending work func like the
|
||||
* processing.
|
||||
* implement commits with a single pending work func.
|
||||
*
|
||||
* Processing paths acquire the rwsem for reading while they're making
|
||||
* multiple dependent changes. When they're done and want it persistent
|
||||
* they add themselves to the list of waiters and queue the commit work.
|
||||
* This work runs, acquires the lock to exclude other writers, and
|
||||
* performs the commit. Readers can run concurrently with these
|
||||
* commits.
|
||||
* Processing paths hold the commit while they're making multiple
|
||||
* dependent changes. When they're done and want it persistent they add
|
||||
* queue the commit work. This work runs, performs the commit, and
|
||||
* wakes all the applying waiters with the result. Readers can run
|
||||
* concurrently with these commits.
|
||||
*/
|
||||
static void scoutfs_server_commit_func(struct work_struct *work)
|
||||
{
|
||||
@@ -317,15 +546,15 @@ static void scoutfs_server_commit_func(struct work_struct *work)
|
||||
commit_work);
|
||||
struct super_block *sb = server->sb;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct commit_waiter *cw;
|
||||
struct commit_waiter *pos;
|
||||
struct llist_node *node;
|
||||
struct commit_users *cusers = &server->cusers;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_server_commit_work_enter(sb, 0, 0);
|
||||
scoutfs_inc_counter(sb, server_commit_worker);
|
||||
|
||||
down_write(&server->commit_rwsem);
|
||||
ret = commit_start(sb, cusers);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
@@ -402,15 +631,8 @@ static void scoutfs_server_commit_func(struct work_struct *work)
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
node = llist_del_all(&server->commit_waiters);
|
||||
commit_end(sb, cusers, ret);
|
||||
|
||||
/* waiters always wait on completion, cw could be free after complete */
|
||||
llist_for_each_entry_safe(cw, pos, node, node) {
|
||||
cw->ret = ret;
|
||||
complete(&cw->comp);
|
||||
}
|
||||
|
||||
up_write(&server->commit_rwsem);
|
||||
trace_scoutfs_server_commit_work_exit(sb, 0, ret);
|
||||
}
|
||||
|
||||
@@ -421,6 +643,7 @@ static int server_alloc_inodes(struct super_block *sb,
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_net_inode_alloc ial = { 0, };
|
||||
COMMIT_HOLD(hold);
|
||||
__le64 lecount;
|
||||
u64 ino;
|
||||
u64 nr;
|
||||
@@ -433,7 +656,7 @@ static int server_alloc_inodes(struct super_block *sb,
|
||||
|
||||
memcpy(&lecount, arg, arg_len);
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
spin_lock(&sbi->next_ino_lock);
|
||||
ino = le64_to_cpu(super->next_ino);
|
||||
@@ -441,7 +664,7 @@ static int server_alloc_inodes(struct super_block *sb,
|
||||
le64_add_cpu(&super->next_ino, nr);
|
||||
spin_unlock(&sbi->next_ino_lock);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, 0);
|
||||
ret = server_apply_commit(sb, &hold, 0);
|
||||
if (ret == 0) {
|
||||
ial.ino = cpu_to_le64(ino);
|
||||
ial.nr = cpu_to_le64(nr);
|
||||
@@ -819,7 +1042,7 @@ static int next_log_merge_item(struct super_block *sb,
|
||||
#define FINALIZE_POLL_MS (11)
|
||||
#define FINALIZE_TIMEOUT_MS (MSEC_PER_SEC / 2)
|
||||
static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_log_trees *lt,
|
||||
u64 rid)
|
||||
u64 rid, struct commit_hold *hold)
|
||||
{
|
||||
struct server_info *server = SCOUTFS_SB(sb)->server_info;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
@@ -945,13 +1168,13 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
/* wait a bit for mounts to arrive */
|
||||
if (others_active) {
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
ret = scoutfs_server_apply_commit(sb, 0);
|
||||
ret = server_apply_commit(sb, hold, 0);
|
||||
if (ret < 0)
|
||||
err_str = "applying commit before waiting for finalized";
|
||||
|
||||
msleep(FINALIZE_POLL_MS);
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, hold);
|
||||
mutex_lock(&server->logs_mutex);
|
||||
|
||||
/* done if we timed out */
|
||||
@@ -1044,6 +1267,7 @@ static int server_get_log_trees(struct super_block *sb,
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_key key;
|
||||
bool unlock_alloc = false;
|
||||
COMMIT_HOLD(hold);
|
||||
u64 data_zone_blocks;
|
||||
char *err_str = NULL;
|
||||
u64 nr;
|
||||
@@ -1054,7 +1278,7 @@ static int server_get_log_trees(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
mutex_lock(&server->logs_mutex);
|
||||
|
||||
@@ -1092,7 +1316,7 @@ static int server_get_log_trees(struct super_block *sb,
|
||||
}
|
||||
|
||||
/* drops and re-acquires the mutex and commit if it has to wait */
|
||||
ret = finalize_and_start_log_merge(sb, <, rid);
|
||||
ret = finalize_and_start_log_merge(sb, <, rid, &hold);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
@@ -1187,7 +1411,7 @@ unlock:
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
out:
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "error %d getting log trees for rid %016llx: %s",
|
||||
@@ -1213,6 +1437,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *exist;
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_key key;
|
||||
COMMIT_HOLD(hold);
|
||||
char *err_str = NULL;
|
||||
bool committed = false;
|
||||
int ret;
|
||||
@@ -1231,7 +1456,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
mutex_lock(&server->logs_mutex);
|
||||
|
||||
@@ -1280,7 +1505,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
unlock:
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "server error %d committing client logs for rid %016llx: %s",
|
||||
ret, rid, err_str);
|
||||
@@ -1589,6 +1814,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct scoutfs_srch_compact *sc = NULL;
|
||||
COMMIT_HOLD(hold);
|
||||
int ret;
|
||||
|
||||
if (arg_len != 0) {
|
||||
@@ -1602,7 +1828,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
mutex_lock(&server->srch_mutex);
|
||||
ret = scoutfs_srch_get_compact(sb, &server->alloc, &server->wri,
|
||||
@@ -1630,7 +1856,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
||||
mutex_unlock(&server->srch_mutex);
|
||||
|
||||
apply:
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
WARN_ON_ONCE(ret < 0 && ret != -ENOENT); /* XXX leaked busy item */
|
||||
out:
|
||||
ret = scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
@@ -1656,6 +1882,7 @@ static int server_srch_commit_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *sc;
|
||||
struct scoutfs_alloc_list_head av;
|
||||
struct scoutfs_alloc_list_head fr;
|
||||
COMMIT_HOLD(hold);
|
||||
int ret;
|
||||
|
||||
if (arg_len != sizeof(struct scoutfs_srch_compact)) {
|
||||
@@ -1664,7 +1891,7 @@ static int server_srch_commit_compact(struct super_block *sb,
|
||||
}
|
||||
sc = arg;
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
mutex_lock(&server->srch_mutex);
|
||||
ret = scoutfs_srch_commit_compact(sb, &server->alloc, &server->wri,
|
||||
@@ -1682,7 +1909,7 @@ static int server_srch_commit_compact(struct super_block *sb,
|
||||
server->other_freed, &fr);
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
apply:
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
out:
|
||||
WARN_ON(ret < 0); /* XXX leaks allocators */
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
|
||||
@@ -2047,13 +2274,14 @@ static void server_log_merge_free_work(struct work_struct *work)
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_log_merge_freeing fr;
|
||||
struct scoutfs_key key;
|
||||
COMMIT_HOLD(hold);
|
||||
char *err_str = NULL;
|
||||
bool commit = false;
|
||||
int ret = 0;
|
||||
|
||||
/* shutdown waits for us, we'll eventually load set shutting_down */
|
||||
while (!server->shutting_down) {
|
||||
scoutfs_server_hold_commit(sb);
|
||||
while (!server_is_stopping(server)) {
|
||||
|
||||
server_hold_commit(sb, &hold);
|
||||
mutex_lock(&server->logs_mutex);
|
||||
commit = true;
|
||||
|
||||
@@ -2083,7 +2311,7 @@ static void server_log_merge_free_work(struct work_struct *work)
|
||||
|
||||
ret = scoutfs_btree_free_blocks(sb, &server->alloc,
|
||||
&server->wri, &fr.key,
|
||||
&fr.root, 10);
|
||||
&fr.root, COMMIT_HOLD_ALLOC_BUDGET / 2);
|
||||
if (ret < 0) {
|
||||
err_str = "freeing log btree";
|
||||
break;
|
||||
@@ -2103,7 +2331,7 @@ static void server_log_merge_free_work(struct work_struct *work)
|
||||
BUG_ON(ret < 0);
|
||||
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
commit = false;
|
||||
if (ret < 0) {
|
||||
err_str = "looping commit del/upd freeing item";
|
||||
@@ -2113,7 +2341,7 @@ static void server_log_merge_free_work(struct work_struct *work)
|
||||
|
||||
if (commit) {
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
if (ret < 0)
|
||||
err_str = "final commit del/upd freeing item";
|
||||
}
|
||||
@@ -2145,6 +2373,7 @@ static int server_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_key par_end;
|
||||
struct scoutfs_key next_key;
|
||||
struct scoutfs_key key;
|
||||
COMMIT_HOLD(hold);
|
||||
char *err_str = NULL;
|
||||
bool ins_rng;
|
||||
bool del_remain;
|
||||
@@ -2158,7 +2387,7 @@ static int server_get_log_merge(struct super_block *sb,
|
||||
if (arg_len != 0)
|
||||
return -EINVAL;
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
mutex_lock(&server->logs_mutex);
|
||||
|
||||
restart:
|
||||
@@ -2401,7 +2630,7 @@ out:
|
||||
}
|
||||
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, &req, sizeof(req));
|
||||
}
|
||||
@@ -2425,6 +2654,7 @@ static int server_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_status stat;
|
||||
struct scoutfs_log_merge_range rng;
|
||||
struct scoutfs_key key;
|
||||
COMMIT_HOLD(hold);
|
||||
char *err_str = NULL;
|
||||
bool deleted = false;
|
||||
int ret = 0;
|
||||
@@ -2442,7 +2672,7 @@ static int server_commit_log_merge(struct super_block *sb,
|
||||
le64_to_cpu(comp->seq),
|
||||
le64_to_cpu(comp->flags));
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
mutex_lock(&server->logs_mutex);
|
||||
|
||||
/* find the status of the current log merge */
|
||||
@@ -2535,7 +2765,7 @@ out:
|
||||
if (ret < 0 && err_str)
|
||||
scoutfs_err(sb, "error %d committing log merge: %s", ret, err_str);
|
||||
|
||||
err = scoutfs_server_apply_commit(sb, ret);
|
||||
err = server_apply_commit(sb, &hold, ret);
|
||||
BUG_ON(ret < 0 && deleted); /* inconsistent */
|
||||
|
||||
if (ret == 0)
|
||||
@@ -2655,6 +2885,7 @@ static int server_set_volopt(struct super_block *sb, struct scoutfs_net_connecti
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_volume_options *volopt;
|
||||
COMMIT_HOLD(hold);
|
||||
u64 opt;
|
||||
u64 nr;
|
||||
int ret = 0;
|
||||
@@ -2672,7 +2903,7 @@ static int server_set_volopt(struct super_block *sb, struct scoutfs_net_connecti
|
||||
|
||||
mutex_lock(&server->volopt_mutex);
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
if (le64_to_cpu(volopt->set_bits) & SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_BIT) {
|
||||
opt = le64_to_cpu(volopt->data_alloc_zone_blocks);
|
||||
@@ -2703,7 +2934,7 @@ static int server_set_volopt(struct super_block *sb, struct scoutfs_net_connecti
|
||||
}
|
||||
|
||||
apply:
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
|
||||
write_seqcount_begin(&server->volopt_seqcount);
|
||||
if (ret == 0)
|
||||
@@ -2723,6 +2954,7 @@ static int server_clear_volopt(struct super_block *sb, struct scoutfs_net_connec
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_volume_options *volopt;
|
||||
COMMIT_HOLD(hold);
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
int ret = 0;
|
||||
@@ -2741,7 +2973,7 @@ static int server_clear_volopt(struct super_block *sb, struct scoutfs_net_connec
|
||||
|
||||
mutex_lock(&server->volopt_mutex);
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
for (i = 0, bit = 1, opt = first_valopt(&super->volopt); i < 64; i++, bit <<= 1, opt++) {
|
||||
if (le64_to_cpu(volopt->set_bits) & bit) {
|
||||
@@ -2750,7 +2982,7 @@ static int server_clear_volopt(struct super_block *sb, struct scoutfs_net_connec
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
|
||||
write_seqcount_begin(&server->volopt_seqcount);
|
||||
if (ret == 0)
|
||||
@@ -2776,6 +3008,7 @@ static int server_resize_devices(struct super_block *sb, struct scoutfs_net_conn
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_net_resize_devices *nrd;
|
||||
COMMIT_HOLD(hold);
|
||||
u64 meta_tot;
|
||||
u64 meta_start;
|
||||
u64 meta_len;
|
||||
@@ -2794,7 +3027,7 @@ static int server_resize_devices(struct super_block *sb, struct scoutfs_net_conn
|
||||
meta_tot = le64_to_cpu(nrd->new_total_meta_blocks);
|
||||
data_tot = le64_to_cpu(nrd->new_total_data_blocks);
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
mutex_lock(&server->alloc_mutex);
|
||||
|
||||
if (meta_tot == le64_to_cpu(super->total_meta_blocks))
|
||||
@@ -2856,7 +3089,7 @@ static int server_resize_devices(struct super_block *sb, struct scoutfs_net_conn
|
||||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
out:
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
|
||||
};
|
||||
@@ -3180,7 +3413,7 @@ out:
|
||||
*/
|
||||
static void queue_farewell_work(struct server_info *server)
|
||||
{
|
||||
if (!test_shutting_down(server))
|
||||
if (!server_is_stopping(server))
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
}
|
||||
|
||||
@@ -3210,6 +3443,7 @@ static int server_greeting(struct super_block *sb,
|
||||
struct scoutfs_net_greeting *gr = arg;
|
||||
struct scoutfs_net_greeting greet;
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
COMMIT_HOLD(hold);
|
||||
bool reconnecting;
|
||||
bool first_contact;
|
||||
bool farewell;
|
||||
@@ -3237,12 +3471,12 @@ static int server_greeting(struct super_block *sb,
|
||||
}
|
||||
|
||||
if (gr->server_term == 0) {
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
ret = insert_mounted_client(sb, le64_to_cpu(gr->rid), le64_to_cpu(gr->flags),
|
||||
&conn->peername);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
if (ret < 0)
|
||||
goto send_err;
|
||||
@@ -3308,9 +3542,10 @@ struct farewell_request {
|
||||
*/
|
||||
static int reclaim_rid(struct super_block *sb, u64 rid)
|
||||
{
|
||||
COMMIT_HOLD(hold);
|
||||
int ret;
|
||||
|
||||
scoutfs_server_hold_commit(sb);
|
||||
server_hold_commit(sb, &hold);
|
||||
|
||||
/* delete mounted client last, recovery looks for it */
|
||||
ret = scoutfs_lock_server_farewell(sb, rid) ?:
|
||||
@@ -3320,7 +3555,7 @@ static int reclaim_rid(struct super_block *sb, u64 rid)
|
||||
scoutfs_omap_remove_rid(sb, rid) ?:
|
||||
delete_mounted_client(sb, rid);
|
||||
|
||||
return scoutfs_server_apply_commit(sb, ret);
|
||||
return server_apply_commit(sb, &hold, ret);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3693,14 +3928,14 @@ static void fence_pending_recov_worker(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_server_abort(sb);
|
||||
stop_server(server);
|
||||
}
|
||||
|
||||
static void recovery_timeout(struct super_block *sb)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
if (!test_shutting_down(server))
|
||||
if (!server_is_stopping(server))
|
||||
queue_work(server->wq, &server->fence_pending_recov_work);
|
||||
}
|
||||
|
||||
@@ -3765,7 +4000,7 @@ out:
|
||||
|
||||
static void queue_reclaim_work(struct server_info *server, unsigned long delay)
|
||||
{
|
||||
if (!test_shutting_down(server))
|
||||
if (!server_is_stopping(server))
|
||||
queue_delayed_work(server->wq, &server->reclaim_dwork, delay);
|
||||
}
|
||||
|
||||
@@ -3800,7 +4035,7 @@ static void reclaim_worker(struct work_struct *work)
|
||||
if (error == true) {
|
||||
scoutfs_err(sb, "saw error indicator on fence request for rid %016llx, shutting down server",
|
||||
rid);
|
||||
scoutfs_server_abort(sb);
|
||||
stop_server(server);
|
||||
ret = -ESHUTDOWN;
|
||||
goto out;
|
||||
}
|
||||
@@ -3809,7 +4044,7 @@ static void reclaim_worker(struct work_struct *work)
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failure to reclaim fenced rid %016llx: err %d, shutting down server",
|
||||
rid, ret);
|
||||
scoutfs_server_abort(sb);
|
||||
stop_server(server);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -3817,16 +4052,7 @@ static void reclaim_worker(struct work_struct *work)
|
||||
scoutfs_fence_free(sb, rid);
|
||||
scoutfs_server_recov_finish(sb, rid, SCOUTFS_RECOV_ALL);
|
||||
|
||||
/* tell quorum we've finished fencing all previous leaders */
|
||||
if (reason == SCOUTFS_FENCE_QUORUM_BLOCK_LEADER &&
|
||||
!scoutfs_fence_reason_pending(sb, reason)) {
|
||||
ret = scoutfs_quorum_fence_complete(sb, server->term);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
/* queue next reclaim immediately if we're making progress */
|
||||
if (ret == 0)
|
||||
@@ -3842,8 +4068,8 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
struct super_block *sb = server->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_net_connection *conn = NULL;
|
||||
struct scoutfs_mount_options opts;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct sockaddr_in sin;
|
||||
bool alloc_init = false;
|
||||
@@ -3852,7 +4078,8 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
|
||||
trace_scoutfs_server_work_enter(sb, 0, 0);
|
||||
|
||||
scoutfs_quorum_slot_sin(super, opts->quorum_slot_nr, &sin);
|
||||
scoutfs_options_read(sb, &opts);
|
||||
scoutfs_quorum_slot_sin(super, opts.quorum_slot_nr, &sin);
|
||||
scoutfs_info(sb, "server starting at "SIN_FMT, SIN_ARG(&sin));
|
||||
|
||||
scoutfs_block_writer_init(sb, &server->wri);
|
||||
@@ -3941,12 +4168,12 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
scoutfs_net_listen(sb, conn);
|
||||
|
||||
scoutfs_info(sb, "server ready at "SIN_FMT, SIN_ARG(&sin));
|
||||
complete(&server->start_comp);
|
||||
server_up(server);
|
||||
|
||||
queue_reclaim_work(server, 0);
|
||||
|
||||
/* interruptible mostly to avoid stuck messages */
|
||||
wait_event_interruptible(server->waitq, test_shutting_down(server));
|
||||
wait_event_interruptible(server->waitq, server_is_stopping(server));
|
||||
|
||||
shutdown:
|
||||
scoutfs_info(sb, "server shutting down at "SIN_FMT, SIN_ARG(&sin));
|
||||
@@ -3980,60 +4207,44 @@ out:
|
||||
scoutfs_fence_stop(sb);
|
||||
scoutfs_net_free_conn(sb, conn);
|
||||
|
||||
/* let quorum know that we've shutdown */
|
||||
scoutfs_quorum_server_shutdown(sb, server->term);
|
||||
server_down(server);
|
||||
|
||||
scoutfs_info(sb, "server stopped at "SIN_FMT, SIN_ARG(&sin));
|
||||
trace_scoutfs_server_work_exit(sb, 0, ret);
|
||||
|
||||
server->err = ret;
|
||||
complete(&server->start_comp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the server to successfully start. If this returns error then
|
||||
* the super block's fence_term has been set to the new server's term so
|
||||
* that it won't be fenced.
|
||||
* Start the server but don't wait for it to complete.
|
||||
*/
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term)
|
||||
void scoutfs_server_start(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
server->err = 0;
|
||||
set_shutting_down(server, false);
|
||||
server->term = term;
|
||||
init_completion(&server->start_comp);
|
||||
|
||||
queue_work(server->wq, &server->work);
|
||||
|
||||
wait_for_completion(&server->start_comp);
|
||||
return server->err;
|
||||
if (cmpxchg(&server->status, SERVER_DOWN, SERVER_STARTING) == SERVER_DOWN) {
|
||||
server->term = term;
|
||||
queue_work(server->wq, &server->work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Start shutdown on the server but don't want for it to finish.
|
||||
*/
|
||||
void scoutfs_server_abort(struct super_block *sb)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
stop_server(server);
|
||||
}
|
||||
|
||||
/*
|
||||
* Once the server is stopped we give the caller our election info
|
||||
* which might have been modified while we were running.
|
||||
*/
|
||||
void scoutfs_server_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
stop_server(server);
|
||||
}
|
||||
|
||||
cancel_work_sync(&server->work);
|
||||
cancel_work_sync(&server->farewell_work);
|
||||
cancel_work_sync(&server->commit_work);
|
||||
cancel_work_sync(&server->log_merge_free_work);
|
||||
/*
|
||||
* Start shutdown on the server and wait for it to finish.
|
||||
*/
|
||||
void scoutfs_server_stop_wait(struct super_block *sb)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
stop_server(server);
|
||||
flush_work_sync(&server->work);
|
||||
}
|
||||
|
||||
int scoutfs_server_setup(struct super_block *sb)
|
||||
@@ -4049,8 +4260,8 @@ int scoutfs_server_setup(struct super_block *sb)
|
||||
spin_lock_init(&server->lock);
|
||||
init_waitqueue_head(&server->waitq);
|
||||
INIT_WORK(&server->work, scoutfs_server_worker);
|
||||
init_rwsem(&server->commit_rwsem);
|
||||
init_llist_head(&server->commit_waiters);
|
||||
server->status = SERVER_DOWN;
|
||||
init_commit_users(&server->cusers);
|
||||
INIT_WORK(&server->commit_work, scoutfs_server_commit_func);
|
||||
INIT_LIST_HEAD(&server->clients);
|
||||
spin_lock_init(&server->farewell_lock);
|
||||
|
||||
@@ -64,8 +64,6 @@ int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
void scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
|
||||
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
|
||||
@@ -77,9 +75,12 @@ u64 scoutfs_server_seq(struct super_block *sb);
|
||||
u64 scoutfs_server_next_seq(struct super_block *sb);
|
||||
void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq);
|
||||
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
void scoutfs_server_stop_wait(struct super_block *sb);
|
||||
bool scoutfs_server_is_running(struct super_block *sb);
|
||||
bool scoutfs_server_is_up(struct super_block *sb);
|
||||
bool scoutfs_server_is_down(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_setup(struct super_block *sb);
|
||||
void scoutfs_server_destroy(struct super_block *sb);
|
||||
|
||||
@@ -132,44 +132,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts->metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int scoutfs_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
trace_scoutfs_sync_fs(sb, wait);
|
||||
@@ -246,13 +208,11 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_fence_destroy(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
scoutfs_sysfs_destroy_attrs(sb, &sbi->mopts_ssa);
|
||||
debugfs_remove(sbi->debug_root);
|
||||
scoutfs_destroy_counters(sb);
|
||||
scoutfs_destroy_sysfs(sb);
|
||||
scoutfs_metadev_close(sb);
|
||||
|
||||
kfree(sbi->opts.metadev_path);
|
||||
kfree(sbi);
|
||||
|
||||
sb->s_fs_info = NULL;
|
||||
@@ -282,7 +242,7 @@ static const struct super_operations scoutfs_super_ops = {
|
||||
.destroy_inode = scoutfs_destroy_inode,
|
||||
.sync_fs = scoutfs_sync_fs,
|
||||
.statfs = scoutfs_statfs,
|
||||
.show_options = scoutfs_show_options,
|
||||
.show_options = scoutfs_options_show,
|
||||
.put_super = scoutfs_put_super,
|
||||
.umount_begin = scoutfs_umount_begin,
|
||||
};
|
||||
@@ -511,9 +471,9 @@ out:
|
||||
|
||||
static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct mount_options opts;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct block_device *meta_bdev;
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
@@ -541,13 +501,12 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
spin_lock_init(&sbi->next_ino_lock);
|
||||
spin_lock_init(&sbi->data_wait_root.lock);
|
||||
sbi->data_wait_root.root = RB_ROOT;
|
||||
scoutfs_sysfs_init_attrs(sb, &sbi->mopts_ssa);
|
||||
|
||||
ret = scoutfs_parse_options(sb, data, &opts);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sbi->opts = opts;
|
||||
/* parse options early for use during setup */
|
||||
ret = scoutfs_options_early_setup(sb, data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
ret = sb_set_blocksize(sb, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != SCOUTFS_BLOCK_SM_SIZE) {
|
||||
@@ -556,9 +515,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
meta_bdev =
|
||||
blkdev_get_by_path(sbi->opts.metadev_path,
|
||||
SCOUTFS_META_BDEV_MODE, sb);
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb);
|
||||
if (IS_ERR(meta_bdev)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev));
|
||||
@@ -578,8 +535,6 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_sysfs(sb) ?:
|
||||
scoutfs_setup_counters(sb) ?:
|
||||
scoutfs_options_setup(sb) ?:
|
||||
scoutfs_sysfs_create_attrs(sb, &sbi->mopts_ssa,
|
||||
mount_options_attrs, "mount_options") ?:
|
||||
scoutfs_setup_triggers(sb) ?:
|
||||
scoutfs_fence_setup(sb) ?:
|
||||
scoutfs_block_setup(sb) ?:
|
||||
@@ -652,6 +607,7 @@ static void scoutfs_kill_sb(struct super_block *sb)
|
||||
}
|
||||
|
||||
if (SCOUTFS_HAS_SBI(sb)) {
|
||||
scoutfs_options_stop(sb);
|
||||
scoutfs_inode_orphan_stop(sb);
|
||||
scoutfs_lock_unmount_begin(sb);
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ struct scoutfs_sb_info {
|
||||
|
||||
spinlock_t next_ino_lock;
|
||||
|
||||
struct options_info *options_info;
|
||||
struct data_info *data_info;
|
||||
struct inode_sb_info *inode_sb_info;
|
||||
struct btree_info *btree_info;
|
||||
@@ -74,10 +75,6 @@ struct scoutfs_sb_info {
|
||||
struct scoutfs_counters *counters;
|
||||
struct scoutfs_triggers *triggers;
|
||||
|
||||
struct mount_options opts;
|
||||
struct options_sb_info *options;
|
||||
struct scoutfs_sysfs_attrs mopts_ssa;
|
||||
|
||||
struct dentry *debug_root;
|
||||
|
||||
bool forced_unmount;
|
||||
|
||||
@@ -37,6 +37,15 @@ struct attr_funcs {
|
||||
#define ATTR_FUNCS_RO(_name) \
|
||||
static struct attr_funcs _name##_attr_funcs = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t data_device_maj_min_show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
MAJOR(sb->s_bdev->bd_dev), MINOR(sb->s_bdev->bd_dev));
|
||||
}
|
||||
ATTR_FUNCS_RO(data_device_maj_min);
|
||||
|
||||
static ssize_t format_version_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -101,6 +110,7 @@ static ssize_t attr_funcs_show(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
|
||||
static struct attribute *sb_id_attrs[] = {
|
||||
&data_device_maj_min_attr_funcs.attr,
|
||||
&format_version_attr_funcs.attr,
|
||||
&fsid_attr_funcs.attr,
|
||||
&rid_attr_funcs.attr,
|
||||
|
||||
@@ -640,6 +640,7 @@ void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
tri->write_workq = NULL;
|
||||
}
|
||||
|
||||
scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri);
|
||||
scoutfs_block_writer_forget_all(sb, &tri->wri);
|
||||
|
||||
kfree(tri);
|
||||
|
||||
289
kmod/src/xattr.c
289
kmod/src/xattr.c
@@ -57,12 +57,6 @@ static u32 xattr_names_equal(const char *a_name, unsigned int a_len,
|
||||
return a_len == b_len && memcmp(a_name, b_name, a_len) == 0;
|
||||
}
|
||||
|
||||
static unsigned int xattr_full_bytes(struct scoutfs_xattr *xat)
|
||||
{
|
||||
return offsetof(struct scoutfs_xattr,
|
||||
name[xat->name_len + le16_to_cpu(xat->val_len)]);
|
||||
}
|
||||
|
||||
static unsigned int xattr_nr_parts(struct scoutfs_xattr *xat)
|
||||
{
|
||||
return SCOUTFS_XATTR_NR_PARTS(xat->name_len,
|
||||
@@ -137,12 +131,29 @@ int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the next xattr and copy the key, xattr header, and as much of
|
||||
* the name and value into the callers buffer as we can. Returns the
|
||||
* number of bytes copied which include the header, name, and value and
|
||||
* can be limited by the xattr length or the callers buffer. The caller
|
||||
* is responsible for comparing their lengths, the header, and the
|
||||
* returned length before safely using the xattr.
|
||||
* xattrs are stored in multiple items. The first item is a
|
||||
* concatenation of an initial header, the name, and then as much of the
|
||||
* value as fits in the remainder of the first item. This return the
|
||||
* size of the first item that'd store an xattr with the given name
|
||||
* length and value payload size.
|
||||
*/
|
||||
static int first_item_bytes(int name_len, size_t size)
|
||||
{
|
||||
if (WARN_ON_ONCE(name_len <= 0) ||
|
||||
WARN_ON_ONCE(name_len > SCOUTFS_XATTR_MAX_NAME_LEN))
|
||||
return 0;
|
||||
|
||||
return min_t(int, sizeof(struct scoutfs_xattr) + name_len + size,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the next xattr, set the caller's key, and copy as much of the
|
||||
* first item into the callers buffer as we can. Returns the number of
|
||||
* bytes copied which can include the header, name, and start of the
|
||||
* value from the first item. The caller is responsible for comparing
|
||||
* their lengths, the header, and the returned length before safely
|
||||
* using the buffer.
|
||||
*
|
||||
* If a name is provided then we'll iterate over items with a matching
|
||||
* name_hash until we find a matching name. If we don't find a matching
|
||||
@@ -154,20 +165,17 @@ int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
* Returns -ENOENT if it didn't find a next item.
|
||||
*/
|
||||
static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
struct scoutfs_xattr *xat, unsigned int bytes,
|
||||
struct scoutfs_xattr *xat, unsigned int xat_bytes,
|
||||
const char *name, unsigned int name_len,
|
||||
u64 name_hash, u64 id, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_key last;
|
||||
u8 last_part;
|
||||
int total;
|
||||
u8 part;
|
||||
int ret;
|
||||
|
||||
/* need to be able to see the name we're looking for */
|
||||
if (WARN_ON_ONCE(name_len > 0 && bytes < offsetof(struct scoutfs_xattr,
|
||||
name[name_len])))
|
||||
if (WARN_ON_ONCE(name_len > 0 &&
|
||||
xat_bytes < offsetof(struct scoutfs_xattr, name[name_len])))
|
||||
return -EINVAL;
|
||||
|
||||
if (name_len)
|
||||
@@ -176,26 +184,15 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
init_xattr_key(key, scoutfs_ino(inode), name_hash, id);
|
||||
init_xattr_key(&last, scoutfs_ino(inode), U32_MAX, U64_MAX);
|
||||
|
||||
last_part = 0;
|
||||
part = 0;
|
||||
total = 0;
|
||||
|
||||
for (;;) {
|
||||
key->skx_part = part;
|
||||
ret = scoutfs_item_next(sb, key, &last,
|
||||
(void *)xat + total, bytes - total,
|
||||
lock);
|
||||
if (ret < 0) {
|
||||
/* XXX corruption, ran out of parts */
|
||||
if (ret == -ENOENT && part > 0)
|
||||
ret = -EIO;
|
||||
ret = scoutfs_item_next(sb, key, &last, xat, xat_bytes, lock);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_xattr_get_next_key(sb, key);
|
||||
|
||||
/* XXX corruption */
|
||||
if (key->skx_part != part) {
|
||||
if (key->skx_part != 0) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
@@ -205,8 +202,7 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
* the first part and if the next xattr name fits in our
|
||||
* buffer then the item must have included it.
|
||||
*/
|
||||
if (part == 0 &&
|
||||
(ret < sizeof(struct scoutfs_xattr) ||
|
||||
if ((ret < sizeof(struct scoutfs_xattr) ||
|
||||
(xat->name_len <= name_len &&
|
||||
ret < offsetof(struct scoutfs_xattr,
|
||||
name[xat->name_len])) ||
|
||||
@@ -216,7 +212,7 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
break;
|
||||
}
|
||||
|
||||
if (part == 0 && name_len) {
|
||||
if (name_len > 0) {
|
||||
/* ran out of names that could match */
|
||||
if (le64_to_cpu(key->skx_name_hash) != name_hash) {
|
||||
ret = -ENOENT;
|
||||
@@ -224,64 +220,126 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
}
|
||||
|
||||
/* keep looking for our name */
|
||||
if (!xattr_names_equal(name, name_len,
|
||||
xat->name, xat->name_len)) {
|
||||
part = 0;
|
||||
if (!xattr_names_equal(name, name_len, xat->name, xat->name_len)) {
|
||||
le64_add_cpu(&key->skx_id, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* use the matching name we found */
|
||||
last_part = xattr_nr_parts(xat) - 1;
|
||||
}
|
||||
|
||||
total += ret;
|
||||
if (total == bytes || part == last_part) {
|
||||
/* copied as much as we could */
|
||||
ret = total;
|
||||
break;
|
||||
}
|
||||
part++;
|
||||
/* found next name */
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has already read and verified the xattr's first item.
|
||||
* Copy the value from the tail of the first item and from any future
|
||||
* items into the destination buffer.
|
||||
*/
|
||||
static int copy_xattr_value(struct super_block *sb, struct scoutfs_key *xat_key,
|
||||
struct scoutfs_xattr *xat, int xat_bytes,
|
||||
char *buffer, size_t size,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
size_t copied = 0;
|
||||
int val_tail;
|
||||
int bytes;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* must have first item up to value */
|
||||
if (WARN_ON_ONCE(xat_bytes < sizeof(struct scoutfs_xattr)) ||
|
||||
WARN_ON_ONCE(xat_bytes < offsetof(struct scoutfs_xattr, name[xat->name_len])))
|
||||
return -EINVAL;
|
||||
|
||||
/* only ever copy up to the full value */
|
||||
size = min_t(size_t, size, le16_to_cpu(xat->val_len));
|
||||
|
||||
/* must have full first item if caller needs value from second item */
|
||||
val_tail = SCOUTFS_XATTR_MAX_PART_SIZE -
|
||||
offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
if (WARN_ON_ONCE(size > val_tail && xat_bytes != SCOUTFS_XATTR_MAX_PART_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
/* copy from tail of first item */
|
||||
bytes = min_t(unsigned int, size, val_tail);
|
||||
if (bytes > 0) {
|
||||
memcpy(buffer, &xat->name[xat->name_len], bytes);
|
||||
copied += bytes;
|
||||
}
|
||||
|
||||
key = *xat_key;
|
||||
for (i = 1; copied < size; i++) {
|
||||
key.skx_part = i;
|
||||
bytes = min_t(unsigned int, size - copied, SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
ret = scoutfs_item_lookup(sb, &key, buffer + copied, bytes, lock);
|
||||
if (ret >= 0 && ret != bytes)
|
||||
ret = -EIO;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
copied += ret;
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is working with items that are either in the allocated
|
||||
* first compound item or further items that are offsets into a value
|
||||
* buffer. Give them a pointer and length of the start of the item.
|
||||
*/
|
||||
static void xattr_item_part_buffer(void **buf, int *len, int part,
|
||||
struct scoutfs_xattr *xat, unsigned int xat_bytes,
|
||||
const char *value, size_t size)
|
||||
{
|
||||
int off;
|
||||
|
||||
if (part == 0) {
|
||||
*buf = xat;
|
||||
*len = xat_bytes;
|
||||
} else {
|
||||
off = (part * SCOUTFS_XATTR_MAX_PART_SIZE) -
|
||||
offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
BUG_ON(off >= size); /* calls limited by number of parts */
|
||||
*buf = (void *)value + off;
|
||||
*len = min_t(size_t, size - off, SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create all the items associated with the given xattr. If this
|
||||
* returns an error it will have already cleaned up any items it created
|
||||
* before seeing the error.
|
||||
*/
|
||||
static int create_xattr_items(struct inode *inode, u64 id,
|
||||
struct scoutfs_xattr *xat, unsigned int bytes,
|
||||
static int create_xattr_items(struct inode *inode, u64 id, struct scoutfs_xattr *xat,
|
||||
int xat_bytes, const char *value, size_t size, u8 new_parts,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_key key;
|
||||
unsigned int part_bytes;
|
||||
unsigned int total;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
void *buf;
|
||||
int len;
|
||||
int i;
|
||||
|
||||
init_xattr_key(&key, scoutfs_ino(inode),
|
||||
xattr_name_hash(xat->name, xat->name_len), id);
|
||||
|
||||
total = 0;
|
||||
ret = 0;
|
||||
while (total < bytes) {
|
||||
part_bytes = min_t(unsigned int, bytes - total,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
for (i = 0; i < new_parts; i++) {
|
||||
key.skx_part = i;
|
||||
xattr_item_part_buffer(&buf, &len, i, xat, xat_bytes, value, size);
|
||||
|
||||
ret = scoutfs_item_create(sb, &key,
|
||||
(void *)xat + total, part_bytes,
|
||||
lock);
|
||||
if (ret) {
|
||||
ret = scoutfs_item_create(sb, &key, buf, len, lock);
|
||||
if (ret < 0) {
|
||||
while (key.skx_part-- > 0)
|
||||
scoutfs_item_delete(sb, &key, lock);
|
||||
break;
|
||||
}
|
||||
|
||||
total += part_bytes;
|
||||
key.skx_part++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -329,20 +387,20 @@ out:
|
||||
* deleted items.
|
||||
*/
|
||||
static int change_xattr_items(struct inode *inode, u64 id,
|
||||
struct scoutfs_xattr *new_xat,
|
||||
unsigned int new_bytes, u8 new_parts,
|
||||
u8 old_parts, struct scoutfs_lock *lock)
|
||||
struct scoutfs_xattr *xat, int xat_bytes,
|
||||
const char *value, size_t size,
|
||||
u8 new_parts, u8 old_parts, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_key key;
|
||||
int last_created = -1;
|
||||
int bytes;
|
||||
int off;
|
||||
void *buf;
|
||||
int len;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_xattr_key(&key, scoutfs_ino(inode),
|
||||
xattr_name_hash(new_xat->name, new_xat->name_len), id);
|
||||
xattr_name_hash(xat->name, xat->name_len), id);
|
||||
|
||||
/* dirty existing old items */
|
||||
for (i = 0; i < old_parts; i++) {
|
||||
@@ -354,13 +412,10 @@ static int change_xattr_items(struct inode *inode, u64 id,
|
||||
|
||||
/* create any new items past the old */
|
||||
for (i = old_parts; i < new_parts; i++) {
|
||||
off = i * SCOUTFS_XATTR_MAX_PART_SIZE;
|
||||
bytes = min_t(unsigned int, new_bytes - off,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
key.skx_part = i;
|
||||
ret = scoutfs_item_create(sb, &key, (void *)new_xat + off,
|
||||
bytes, lock);
|
||||
xattr_item_part_buffer(&buf, &len, i, xat, xat_bytes, value, size);
|
||||
|
||||
ret = scoutfs_item_create(sb, &key, buf, len, lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -369,13 +424,10 @@ static int change_xattr_items(struct inode *inode, u64 id,
|
||||
|
||||
/* update dirtied overlapping existing items, last partial first */
|
||||
for (i = min(old_parts, new_parts) - 1; i >= 0; i--) {
|
||||
off = i * SCOUTFS_XATTR_MAX_PART_SIZE;
|
||||
bytes = min_t(unsigned int, new_bytes - off,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
key.skx_part = i;
|
||||
ret = scoutfs_item_update(sb, &key, (void *)new_xat + off,
|
||||
bytes, lock);
|
||||
xattr_item_part_buffer(&buf, &len, i, xat, xat_bytes, value, size);
|
||||
|
||||
ret = scoutfs_item_update(sb, &key, buf, len, lock);
|
||||
/* only last partial can fail, then we unwind created */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -412,7 +464,7 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *lck = NULL;
|
||||
struct scoutfs_key key;
|
||||
unsigned int bytes;
|
||||
unsigned int xat_bytes;
|
||||
size_t name_len;
|
||||
int ret;
|
||||
|
||||
@@ -423,9 +475,8 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
if (name_len > SCOUTFS_XATTR_MAX_NAME_LEN)
|
||||
return -ENODATA;
|
||||
|
||||
/* only need enough for caller's name and value sizes */
|
||||
bytes = sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
xat = __vmalloc(bytes, GFP_NOFS, PAGE_KERNEL);
|
||||
xat_bytes = first_item_bytes(name_len, size);
|
||||
xat = kmalloc(xat_bytes, GFP_NOFS);
|
||||
if (!xat)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -435,40 +486,32 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
|
||||
down_read(&si->xattr_rwsem);
|
||||
|
||||
ret = get_next_xattr(inode, &key, xat, bytes,
|
||||
name, name_len, 0, 0, lck);
|
||||
|
||||
up_read(&si->xattr_rwsem);
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_READ);
|
||||
ret = get_next_xattr(inode, &key, xat, xat_bytes, name, name_len, 0, 0, lck);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENODATA;
|
||||
goto out;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* the caller just wants to know the size */
|
||||
if (size == 0) {
|
||||
ret = le16_to_cpu(xat->val_len);
|
||||
goto out;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* the caller's buffer wasn't big enough */
|
||||
if (size < le16_to_cpu(xat->val_len)) {
|
||||
ret = -ERANGE;
|
||||
goto out;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* XXX corruption, the items didn't match the header */
|
||||
if (ret < xattr_full_bytes(xat)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = le16_to_cpu(xat->val_len);
|
||||
memcpy(buffer, &xat->name[xat->name_len], ret);
|
||||
ret = copy_xattr_value(sb, &key, xat, xat_bytes, buffer, size, lck);
|
||||
unlock:
|
||||
up_read(&si->xattr_rwsem);
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_READ);
|
||||
out:
|
||||
vfree(xat);
|
||||
kfree(xat);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -596,7 +639,8 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
bool undo_totl = false;
|
||||
LIST_HEAD(ind_locks);
|
||||
u8 found_parts;
|
||||
unsigned int bytes;
|
||||
unsigned int xat_bytes_totl;
|
||||
unsigned int xat_bytes;
|
||||
unsigned int val_len;
|
||||
u64 ind_seq;
|
||||
u64 total;
|
||||
@@ -629,9 +673,12 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
if (tgs.totl && ((ret = parse_totl_key(&totl_key, name, name_len)) != 0))
|
||||
return ret;
|
||||
|
||||
bytes = sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
/* alloc enough to read old totl value */
|
||||
xat = __vmalloc(bytes + SCOUTFS_XATTR_MAX_TOTL_U64, GFP_NOFS, PAGE_KERNEL);
|
||||
/* allocate enough to always read an existing xattr's totl */
|
||||
xat_bytes_totl = first_item_bytes(name_len,
|
||||
max_t(size_t, size, SCOUTFS_XATTR_MAX_TOTL_U64));
|
||||
/* but store partial first item that only includes the new xattr's value */
|
||||
xat_bytes = first_item_bytes(name_len, size);
|
||||
xat = kmalloc(xat_bytes_totl, GFP_NOFS);
|
||||
if (!xat) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -645,9 +692,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
down_write(&si->xattr_rwsem);
|
||||
|
||||
/* find an existing xattr to delete, including possible totl value */
|
||||
ret = get_next_xattr(inode, &key, xat,
|
||||
sizeof(struct scoutfs_xattr) + name_len + SCOUTFS_XATTR_MAX_TOTL_U64,
|
||||
name, name_len, 0, 0, lck);
|
||||
ret = get_next_xattr(inode, &key, xat, xat_bytes_totl, name, name_len, 0, 0, lck);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
goto unlock;
|
||||
|
||||
@@ -683,7 +728,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
le64_add_cpu(&tval.total, -total);
|
||||
}
|
||||
|
||||
/* prepare our xattr */
|
||||
/* prepare the xattr header, name, and start of value in first item */
|
||||
if (value) {
|
||||
if (found_parts)
|
||||
id = le64_to_cpu(key.skx_id);
|
||||
@@ -693,7 +738,9 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
xat->val_len = cpu_to_le16(size);
|
||||
memset(xat->__pad, 0, sizeof(xat->__pad));
|
||||
memcpy(xat->name, name, name_len);
|
||||
memcpy(&xat->name[xat->name_len], value, size);
|
||||
memcpy(&xat->name[name_len], value,
|
||||
min(size, SCOUTFS_XATTR_MAX_PART_SIZE -
|
||||
offsetof(struct scoutfs_xattr, name[name_len])));
|
||||
|
||||
if (tgs.totl) {
|
||||
ret = parse_totl_u64(value, size, &total);
|
||||
@@ -741,14 +788,15 @@ retry:
|
||||
}
|
||||
|
||||
if (found_parts && value)
|
||||
ret = change_xattr_items(inode, id, xat, bytes,
|
||||
ret = change_xattr_items(inode, id, xat, xat_bytes, value, size,
|
||||
xattr_nr_parts(xat), found_parts, lck);
|
||||
else if (found_parts)
|
||||
ret = delete_xattr_items(inode, le64_to_cpu(key.skx_name_hash),
|
||||
le64_to_cpu(key.skx_id), found_parts,
|
||||
lck);
|
||||
else
|
||||
ret = create_xattr_items(inode, id, xat, bytes, lck);
|
||||
ret = create_xattr_items(inode, id, xat, xat_bytes, value, size,
|
||||
xattr_nr_parts(xat), lck);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
@@ -778,7 +826,7 @@ unlock:
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, totl_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
out:
|
||||
vfree(xat);
|
||||
kfree(xat);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -807,7 +855,7 @@ ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *lck = NULL;
|
||||
struct scoutfs_key key;
|
||||
unsigned int bytes;
|
||||
unsigned int xat_bytes;
|
||||
ssize_t total = 0;
|
||||
u32 name_hash = 0;
|
||||
bool is_hidden;
|
||||
@@ -820,8 +868,8 @@ ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
id = *id_pos;
|
||||
|
||||
/* need a buffer large enough for all possible names */
|
||||
bytes = sizeof(struct scoutfs_xattr) + SCOUTFS_XATTR_MAX_NAME_LEN;
|
||||
xat = kmalloc(bytes, GFP_NOFS);
|
||||
xat_bytes = first_item_bytes(SCOUTFS_XATTR_MAX_NAME_LEN, 0);
|
||||
xat = kmalloc(xat_bytes, GFP_NOFS);
|
||||
if (!xat) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -834,8 +882,7 @@ ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
down_read(&si->xattr_rwsem);
|
||||
|
||||
for (;;) {
|
||||
ret = get_next_xattr(inode, &key, xat, bytes,
|
||||
NULL, 0, name_hash, id, lck);
|
||||
ret = get_next_xattr(inode, &key, xat, xat_bytes, NULL, 0, name_hash, id, lck);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = total;
|
||||
|
||||
1
tests/.gitignore
vendored
1
tests/.gitignore
vendored
@@ -3,6 +3,7 @@ src/createmany
|
||||
src/dumb_renameat2
|
||||
src/dumb_setxattr
|
||||
src/handle_cat
|
||||
src/handle_fsetxattr
|
||||
src/bulk_create_paths
|
||||
src/find_xattrs
|
||||
src/stage_tmpfile
|
||||
|
||||
@@ -6,6 +6,7 @@ BIN := src/createmany \
|
||||
src/dumb_renameat2 \
|
||||
src/dumb_setxattr \
|
||||
src/handle_cat \
|
||||
src/handle_fsetxattr \
|
||||
src/bulk_create_paths \
|
||||
src/stage_tmpfile \
|
||||
src/find_xattrs \
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
#
|
||||
# This fencing script is used for testing clusters of multiple mounts on
|
||||
# a single host. It finds mounts to fence by looking for their rids and
|
||||
# only knows how to "fence" by using forced unmount.
|
||||
#
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
log() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
@@ -7,29 +20,24 @@ echo_fail() {
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
#
|
||||
# Look for a local mount with the rid to fence. Typically we'll at
|
||||
# least find the mount with the server that requested the fence that
|
||||
# we're processing. But it's possible that mounts are unmounted
|
||||
# before, or while, we're running.
|
||||
#
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET) || \
|
||||
echo_fail "findmnt -t scoutfs failed" > /dev/stderr
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
[ ! -d "$fs" ] && continue
|
||||
|
||||
for mnt in $mnts; do
|
||||
mnt_rid=$(scoutfs statfs -p "$mnt" -s rid) || \
|
||||
echo_fail "scoutfs statfs $mnt failed"
|
||||
|
||||
if [ "$mnt_rid" == "$rid" ]; then
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt"
|
||||
|
||||
exit 0
|
||||
fs_rid="$(cat $fs/rid)" || \
|
||||
echo_fail "failed to get rid in $fs"
|
||||
if [ "$fs_rid" != "$rid" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
nr="$(cat $fs/data_device_maj_min)" || \
|
||||
echo_fail "failed to get data device major:minor in $fs"
|
||||
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET -S $nr) || \
|
||||
echo_fail "findmnt -t scoutfs -S $nr failed"
|
||||
for mnt in $mnts; do
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt failed"
|
||||
done
|
||||
done
|
||||
|
||||
#
|
||||
# If the mount doesn't exist on this host then it can't access the
|
||||
# devices by definition and can be considered fenced.
|
||||
#
|
||||
exit 0
|
||||
|
||||
@@ -56,8 +56,11 @@ t_filter_dmesg()
|
||||
re="$re|scoutfs .*: all clients recovered"
|
||||
re="$re|scoutfs .* error: client rid.*lock recovery timed out"
|
||||
|
||||
# some tests mount w/o options
|
||||
# we test bad devices and options
|
||||
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
|
||||
re="$re|scoutfs .* error: meta_super META flag not set"
|
||||
re="$re|scoutfs .* error: could not open metadev:.*"
|
||||
re="$re|scoutfs .* error: Unknown or malformed option,.*"
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
|
||||
@@ -75,6 +75,20 @@ t_fs_nrs()
|
||||
seq 0 $((T_NR_MOUNTS - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# outputs "1" if the fs number has "1" in its quorum/is_leader file.
|
||||
# All other cases output 0, including the fs nr being a client which
|
||||
# won't have a quorum/ dir.
|
||||
#
|
||||
t_fs_is_leader()
|
||||
{
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader 2>/dev/null)" == "1" ]; then
|
||||
echo "1"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount nr of the current server. This takes no steps to
|
||||
# ensure that the server doesn't shut down and have some other mount
|
||||
@@ -83,7 +97,7 @@ t_fs_nrs()
|
||||
t_server_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "1" ]; then
|
||||
if [ "$(t_fs_is_leader $i)" == "1" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -101,7 +115,7 @@ t_server_nr()
|
||||
t_first_client_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "0" ]; then
|
||||
if [ "$(t_fs_is_leader $i)" == "0" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -362,3 +376,49 @@ t_wait_for_leader() {
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
t_set_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local val="$3"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
echo "$val" > "$opt"
|
||||
}
|
||||
|
||||
t_set_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local val="$2"
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
t_set_sysfs_mount_option $i $name $val
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _saved_opts
|
||||
t_save_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local opt
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
opt="$(t_sysfs_path $i)/mount_options/$name"
|
||||
ind="$name_$i"
|
||||
|
||||
_saved_opts[$ind]="$(cat $opt)"
|
||||
done
|
||||
}
|
||||
|
||||
t_restore_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
ind="$name_$i"
|
||||
|
||||
t_set_sysfs_mount_option $i $name "${_saved_opts[$ind]}"
|
||||
done
|
||||
}
|
||||
|
||||
6
tests/golden/basic-bad-mounts
Normal file
6
tests/golden/basic-bad-mounts
Normal file
@@ -0,0 +1,6 @@
|
||||
== prepare devices, mount point, and logs
|
||||
== bad devices, bad options
|
||||
== swapped devices
|
||||
== both meta devices
|
||||
== both data devices
|
||||
== good volume, bad option and good options
|
||||
3
tests/golden/fallocate
Normal file
3
tests/golden/fallocate
Normal file
@@ -0,0 +1,3 @@
|
||||
== creating reasonably large per-mount files
|
||||
== 10s of racing cold reads and fallocate nop
|
||||
== cleaning up files
|
||||
@@ -2,3 +2,4 @@
|
||||
== unlinked and opened inodes still exist
|
||||
== orphan from failed evict deletion is picked up
|
||||
== orphaned inos in all mounts all deleted
|
||||
== 30s of racing evict deletion, orphan scanning, and open by handle
|
||||
|
||||
@@ -380,13 +380,14 @@ cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
# Build a fenced config that runs scripts out of the repository rather
|
||||
# than the default system directory
|
||||
#
|
||||
conf="$T_RESULTS/scoutfs-fencd.conf"
|
||||
conf="$T_RESULTS/scoutfs-fenced.conf"
|
||||
cat > $conf << EOF
|
||||
SCOUTFS_FENCED_DELAY=1
|
||||
SCOUTFS_FENCED_RUN=$T_TESTS/fenced-local-force-unmount.sh
|
||||
SCOUTFS_FENCED_RUN_ARGS=""
|
||||
SCOUTFS_FENCED_RUN_ARGS="ignored run args"
|
||||
EOF
|
||||
export SCOUTFS_FENCED_CONFIG_FILE="$conf"
|
||||
T_FENCED_LOG="$T_RESULTS/fenced.log"
|
||||
|
||||
#
|
||||
# Run the agent in the background, log its output, an kill it if we
|
||||
@@ -394,7 +395,7 @@ export SCOUTFS_FENCED_CONFIG_FILE="$conf"
|
||||
#
|
||||
fenced_log()
|
||||
{
|
||||
echo "[$(timestamp)] $*" >> "$T_RESULTS/fenced.stdout.log"
|
||||
echo "[$(timestamp)] $*" >> "$T_FENCED_LOG"
|
||||
}
|
||||
fenced_pid=""
|
||||
kill_fenced()
|
||||
@@ -405,7 +406,7 @@ kill_fenced()
|
||||
fi
|
||||
}
|
||||
trap kill_fenced EXIT
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_RESULTS/fenced.stdout.log" 2> "$T_RESULTS/fenced.stderr.log" &
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_FENCED_LOG" 2>&1 &
|
||||
fenced_pid=$!
|
||||
fenced_log "started fenced pid $fenced_pid in the background"
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
export-get-name-parent.sh
|
||||
basic-block-counts.sh
|
||||
basic-bad-mounts.sh
|
||||
inode-items-updated.sh
|
||||
simple-inode-index.sh
|
||||
simple-staging.sh
|
||||
simple-release-extents.sh
|
||||
fallocate.sh
|
||||
setattr_more.sh
|
||||
offline-extent-waiting.sh
|
||||
move-blocks.sh
|
||||
|
||||
189
tests/src/handle_fsetxattr.c
Normal file
189
tests/src/handle_fsetxattr.c
Normal file
@@ -0,0 +1,189 @@
|
||||
/*
|
||||
* Copyright (C) 2022 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <inttypes.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <endian.h>
|
||||
#include <time.h>
|
||||
#include <linux/types.h>
|
||||
#include <sys/xattr.h>
|
||||
|
||||
#define FILEID_SCOUTFS 0x81
|
||||
#define FILEID_SCOUTFS_WITH_PARENT 0x82
|
||||
|
||||
struct our_handle {
|
||||
struct file_handle handle;
|
||||
/*
|
||||
* scoutfs file handle can be ino or ino/parent. The
|
||||
* handle_type field of struct file_handle denotes which
|
||||
* version is in use. We only use the ino variant here.
|
||||
*/
|
||||
__le64 scoutfs_ino;
|
||||
};
|
||||
|
||||
#define DEFAULT_NAME "user.handle_fsetxattr"
|
||||
#define DEFAULT_VALUE "value"
|
||||
|
||||
static void exit_usage(void)
|
||||
{
|
||||
printf(" -h/-? output this usage message and exit\n"
|
||||
" -e keep trying on enoent, consider success an error\n"
|
||||
" -i <num> 64bit inode number for handle open, can be multiple\n"
|
||||
" -m <string> scoutfs mount path string for ioctl fd\n"
|
||||
" -n <string> optional xattr name string, defaults to \""DEFAULT_NAME"\"\n"
|
||||
" -s <num> loop for num seconds, defaults to 0 for one iteration"
|
||||
" -v <string> optional xattr value string, defaults to \""DEFAULT_VALUE"\"\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct our_handle handle;
|
||||
struct timespec ts;
|
||||
bool enoent_success_err = false;
|
||||
uint64_t seconds = 0;
|
||||
char *value = NULL;
|
||||
char *name = NULL;
|
||||
char *mnt = NULL;
|
||||
int nr_inos = 0;
|
||||
uint64_t *inos;
|
||||
uint64_t i;
|
||||
int *fds;
|
||||
int mntfd;
|
||||
int fd;
|
||||
int ret;
|
||||
char c;
|
||||
int j;
|
||||
|
||||
/* can't have more inos than args */
|
||||
inos = calloc(argc, sizeof(inos[0]));
|
||||
fds = calloc(argc, sizeof(fds[0]));
|
||||
if (!inos || !fds) {
|
||||
perror("calloc");
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < argc; i++)
|
||||
fds[i] = -1;
|
||||
|
||||
while ((c = getopt(argc, argv, "+ei:m:n:s:v:")) != -1) {
|
||||
switch (c) {
|
||||
case 'e':
|
||||
enoent_success_err = true;
|
||||
break;
|
||||
case 'i':
|
||||
inos[nr_inos] = strtoll(optarg, NULL, 0);
|
||||
nr_inos++;
|
||||
break;
|
||||
case 'm':
|
||||
mnt = strdup(optarg);
|
||||
break;
|
||||
case 'n':
|
||||
name = strdup(optarg);
|
||||
break;
|
||||
case 's':
|
||||
seconds = strtoll(optarg, NULL, 0);
|
||||
break;
|
||||
case 'v':
|
||||
value = strdup(optarg);
|
||||
break;
|
||||
case '?':
|
||||
printf("unknown argument: %c\n", optind);
|
||||
case 'h':
|
||||
exit_usage();
|
||||
}
|
||||
}
|
||||
|
||||
if (nr_inos == 0) {
|
||||
printf("specify non-zero inode number with -i\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (!mnt) {
|
||||
printf("specify scoutfs mount path for ioctl with -p\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (name == NULL)
|
||||
name = DEFAULT_NAME;
|
||||
if (value == NULL)
|
||||
value = DEFAULT_VALUE;
|
||||
|
||||
mntfd = open(mnt, O_RDONLY);
|
||||
if (mntfd == -1) {
|
||||
perror("opening mountpoint");
|
||||
return 1;
|
||||
}
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
seconds += ts.tv_sec;
|
||||
|
||||
for (i = 0; ; i++) {
|
||||
for (j = 0; j < nr_inos; j++) {
|
||||
fd = fds[j];
|
||||
|
||||
if (fd < 0) {
|
||||
handle.handle.handle_bytes = sizeof(struct our_handle);
|
||||
handle.handle.handle_type = FILEID_SCOUTFS;
|
||||
handle.scoutfs_ino = htole64(inos[j]);
|
||||
|
||||
fd = open_by_handle_at(mntfd, &handle.handle, O_RDWR);
|
||||
if (fd == -1) {
|
||||
if (!enoent_success_err || errno != ENOENT) {
|
||||
perror("open_by_handle_at");
|
||||
return 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
fds[j] = fd;
|
||||
}
|
||||
|
||||
ret = fsetxattr(fd, name, value, strlen(value), 0);
|
||||
if (ret < 0) {
|
||||
perror("fsetxattr");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if ((i % 10) == 0) {
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
if (ts.tv_sec >= seconds)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (enoent_success_err) {
|
||||
bool able = false;
|
||||
for (i = 0; i < nr_inos; i++) {
|
||||
if (fds[i] >= 0) {
|
||||
printf("was able to open ino %"PRIu64"\n", inos[i]);
|
||||
able = true;
|
||||
}
|
||||
}
|
||||
if (able)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* not bothering to close or free */
|
||||
return 0;
|
||||
}
|
||||
36
tests/tests/basic-bad-mounts.sh
Normal file
36
tests/tests/basic-bad-mounts.sh
Normal file
@@ -0,0 +1,36 @@
|
||||
|
||||
mount_fail()
|
||||
{
|
||||
local mnt=${!#}
|
||||
|
||||
echo "mounting $@" >> $T_TMP.mount.out
|
||||
mount -t scoutfs "$@" >> $T_TMP.mount.out 2>&1
|
||||
if [ $? == 0 ]; then
|
||||
umount "$mnt" || t_fail "couldn't unmount"
|
||||
t_fail "bad mount succeeded"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "== prepare devices, mount point, and logs"
|
||||
SCR="/mnt/scoutfs.extra"
|
||||
mkdir -p "$SCR"
|
||||
> $T_TMP.mount.out
|
||||
scoutfs mkfs -f -Q 0,127.0.0.1,53000 "$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 \
|
||||
|| t_fail "mkfs failed"
|
||||
|
||||
echo "== bad devices, bad options"
|
||||
mount_fail -o _bad /dev/null /dev/null "$SCR"
|
||||
|
||||
echo "== swapped devices"
|
||||
mount_fail -o metadev_path=$T_EX_DATA_DEV,quorum_slot_nr=0 "$T_EX_META_DEV" "$SCR"
|
||||
|
||||
echo "== both meta devices"
|
||||
mount_fail -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_META_DEV" "$SCR"
|
||||
|
||||
echo "== both data devices"
|
||||
mount_fail -o metadev_path=$T_EX_DATA_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
|
||||
echo "== good volume, bad option and good options"
|
||||
mount_fail -o _bad,metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
|
||||
t_pass
|
||||
38
tests/tests/fallocate.sh
Normal file
38
tests/tests/fallocate.sh
Normal file
@@ -0,0 +1,38 @@
|
||||
|
||||
t_require_commands fallocate cat
|
||||
|
||||
echo "== creating reasonably large per-mount files"
|
||||
for n in $(t_fs_nrs); do
|
||||
eval path="\$T_D${n}/file-$n"
|
||||
|
||||
LC_ALL=C fallocate -l 128MiB "$path" || \
|
||||
t_fail "initial creating fallocate failed"
|
||||
done
|
||||
|
||||
#
|
||||
# we had lock inversions between read and fallocate, dropping
|
||||
# the cache each time forces waiting for IO during the calls
|
||||
# with the inverted locks held so we have a better chance
|
||||
# of the deadlock happening.
|
||||
#
|
||||
DURATION=10
|
||||
echo "== ${DURATION}s of racing cold reads and fallocate nop"
|
||||
END=$((SECONDS + DURATION))
|
||||
while [ $SECONDS -le $END ]; do
|
||||
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
for n in $(t_fs_nrs); do
|
||||
eval path="\$T_D${n}/file-$n"
|
||||
|
||||
LC_ALL=C fallocate -o 0 -l 4KiB "$path" &
|
||||
cat "$path" > /dev/null &
|
||||
done
|
||||
|
||||
wait || t_fail "fallocate or cat failed"
|
||||
done
|
||||
|
||||
echo "== cleaning up files"
|
||||
rm -f "$T_D0"/file-*
|
||||
|
||||
t_pass
|
||||
@@ -45,6 +45,18 @@ check_read_write()
|
||||
fi
|
||||
}
|
||||
|
||||
# verify that fenced ran our testing fence script
|
||||
verify_fenced_run()
|
||||
{
|
||||
local rids="$@"
|
||||
local rid
|
||||
|
||||
for rid in $rids; do
|
||||
grep -q ".* running rid '$rid'.* args 'ignored run args'" "$T_FENCED_LOG" || \
|
||||
t_fail "fenced didn't execute RUN script for rid $rid"
|
||||
done
|
||||
}
|
||||
|
||||
echo "== make sure all mounts can see each other"
|
||||
check_read_write
|
||||
|
||||
@@ -62,12 +74,14 @@ done
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_mount $cl
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount all non-server, connection timeout, fence nop, mount"
|
||||
sv=$(t_server_nr)
|
||||
pattern="nonsense"
|
||||
rids=""
|
||||
sync
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
@@ -75,6 +89,7 @@ for cl in $(t_fs_nrs); do
|
||||
fi
|
||||
|
||||
rid=$(t_mount_rid $cl)
|
||||
rids="$rids $rid"
|
||||
pattern="$pattern|$rid"
|
||||
echo "cl $cl sv $sv rid $rid" >> "$T_TMP.log"
|
||||
|
||||
@@ -89,6 +104,7 @@ done
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
# remount all the clients
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
@@ -109,11 +125,17 @@ t_wait_for_leader
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_mount $sv
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount everything, new server fences all previous"
|
||||
sync
|
||||
rids=""
|
||||
# get rids before forced unmount breaks scoutfs statfs
|
||||
for nr in $(t_fs_nrs); do
|
||||
rids="$rids $(t_mount_rid $nr)"
|
||||
done
|
||||
for nr in $(t_fs_nrs); do
|
||||
t_force_umount $nr
|
||||
done
|
||||
@@ -122,6 +144,7 @@ t_mount_all
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
check_read_write
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -30,6 +30,13 @@ inode_exists()
|
||||
test "$?" == 0 -a "$(head -1 $T_TMP.inos.log)" == "$ino"
|
||||
}
|
||||
|
||||
t_save_all_sysfs_mount_options orphan_scan_delay_ms
|
||||
restore_delays()
|
||||
{
|
||||
t_restore_all_sysfs_mount_options orphan_scan_delay_ms
|
||||
}
|
||||
trap restore_delays EXIT
|
||||
|
||||
echo "== test our inode existance function"
|
||||
path="$T_D0/file"
|
||||
touch "$path"
|
||||
@@ -38,6 +45,7 @@ inode_exists $ino || echo "$ino didn't exist"
|
||||
|
||||
echo "== unlinked and opened inodes still exist"
|
||||
sleep 1000000 < "$path" &
|
||||
sleep .1 # wait for background sleep to run and open stdin
|
||||
pid="$!"
|
||||
rm -f "$path"
|
||||
inode_exists $ino || echo "$ino didn't exist"
|
||||
@@ -45,7 +53,8 @@ inode_exists $ino || echo "$ino didn't exist"
|
||||
echo "== orphan from failed evict deletion is picked up"
|
||||
# pending kill signal stops evict from getting locks and deleting
|
||||
silent_kill $pid
|
||||
sleep 55
|
||||
t_set_sysfs_mount_option 0 orphan_scan_delay_ms 1000
|
||||
sleep 5
|
||||
inode_exists $ino && echo "$ino still exists"
|
||||
|
||||
echo "== orphaned inos in all mounts all deleted"
|
||||
@@ -56,6 +65,7 @@ for nr in $(t_fs_nrs); do
|
||||
touch "$path"
|
||||
inos="$inos $(stat -c %i $path)"
|
||||
sleep 1000000 < "$path" &
|
||||
sleep .1 # wait for background sleep to run and open stdin
|
||||
pids="$pids $!"
|
||||
rm -f "$path"
|
||||
done
|
||||
@@ -70,9 +80,63 @@ while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
# wait for orphan scans to run
|
||||
sleep 55
|
||||
t_set_all_sysfs_mount_options orphan_scan_delay_ms 1000
|
||||
# also have to wait for delayed log merge work from mount
|
||||
sleep 15
|
||||
for ino in $inos; do
|
||||
inode_exists $ino && echo "$ino still exists"
|
||||
done
|
||||
|
||||
RUNTIME=30
|
||||
echo "== ${RUNTIME}s of racing evict deletion, orphan scanning, and open by handle"
|
||||
|
||||
# exclude last client mount
|
||||
last=""
|
||||
for nr in $(t_fs_nrs); do
|
||||
last=$nr
|
||||
done
|
||||
|
||||
END=$((SECONDS + RUNTIME))
|
||||
while [ $SECONDS -lt $END ]; do
|
||||
# hold open per-mount unlinked files
|
||||
pids=""
|
||||
ino_args=""
|
||||
for nr in $(t_fs_nrs); do
|
||||
test $nr == $last && continue
|
||||
|
||||
eval path="\$T_D${nr}/racing-$nr"
|
||||
touch "$path"
|
||||
ino_args="$ino_args -i $(stat -c %i $path)"
|
||||
|
||||
sleep 1000000 < "$path" &
|
||||
sleep .1 # wait for sleep to start and open input :/
|
||||
pids="$pids $!"
|
||||
rm -f "$path"
|
||||
done
|
||||
|
||||
# remount excluded last client to force log merging and make orphan visible
|
||||
sync
|
||||
t_umount $last
|
||||
t_mount $last
|
||||
|
||||
# get all mounts scanning orphans at high frequency
|
||||
t_set_all_sysfs_mount_options orphan_scan_delay_ms 100
|
||||
|
||||
# spin having tasks in each mount trying to open/fsetxattr all inos
|
||||
for nr in $(t_fs_nrs); do
|
||||
test $nr == $last && continue
|
||||
|
||||
eval path="\$T_M${nr}"
|
||||
handle_fsetxattr -e $ino_args -m "$path" -s 2 &
|
||||
done
|
||||
|
||||
# trigger eviction deletion of each file in each mount
|
||||
silent_kill $pids
|
||||
|
||||
wait || t_fail "handle_fsetxattr failed"
|
||||
|
||||
# slow down orphan scanning for the next iteration
|
||||
t_set_all_sysfs_mount_options orphan_scan_delay_ms $(((RUNTIME * 2) * 1000))
|
||||
done
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -55,9 +55,21 @@ test -x "$SCOUTFS_FENCED_RUN" || \
|
||||
error_exit "SCOUTFS_FENCED_RUN '$SCOUTFS_FENCED_RUN' isn't executable"
|
||||
|
||||
#
|
||||
# main loop watching for fence request across all filesystems
|
||||
# Main loop watching for fence request across all filesystems. The
|
||||
# server can shut down without waiting for pending fence requests to
|
||||
# finish. All of the interaction with the fence directory and files can
|
||||
# fail at any moment. We will generate log messages when the dir or
|
||||
# files disappear.
|
||||
#
|
||||
|
||||
# generate failure messages to stderr while still echoing 0 for the caller
|
||||
careful_cat()
|
||||
{
|
||||
local path="$@"
|
||||
|
||||
cat "$@" || echo 0
|
||||
}
|
||||
|
||||
while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
for fence in /sys/fs/scoutfs/*/fence/*; do
|
||||
# catches unmatched regex when no dirs
|
||||
@@ -66,7 +78,8 @@ while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
fi
|
||||
|
||||
# skip requests that have been handled
|
||||
if [ $(cat "$fence/fenced") == 1 -o $(cat "$fence/error") == 1 ]; then
|
||||
if [ "$(careful_cat $fence/fenced)" == 1 -o \
|
||||
"$(careful_cat $fence/error)" == 1 ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -81,10 +94,10 @@ while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
export SCOUTFS_FENCED_REQ_RID="$rid"
|
||||
export SCOUTFS_FENCED_REQ_IP="$ip"
|
||||
|
||||
$run $SCOUTFS_FENCED_RUN_ARGS
|
||||
$SCOUTFS_FENCED_RUN $SCOUTFS_FENCED_RUN_ARGS
|
||||
rc=$?
|
||||
if [ "$rc" != 0 ]; then
|
||||
log_message "server $srv fencing rid $rid saw error status $rc from $run"
|
||||
log_message "server $srv fencing rid $rid saw error status $rc"
|
||||
echo 1 > "$fence/error"
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -21,6 +21,21 @@ contains the filesystem's metadata.
|
||||
.sp
|
||||
This option is required.
|
||||
.TP
|
||||
.B orphan_scan_delay_ms=<number>
|
||||
This option sets the average expected delay, in milliseconds, between
|
||||
each mount's scan of the global orphaned inode list. Jitter is added to
|
||||
avoid contention so each individual delay between scans is a random
|
||||
value up to 20% less than or greater than this average expected delay.
|
||||
.sp
|
||||
The minimum value for this option is 100ms which is very short and is
|
||||
only reasonable for testing or experiments. The default is 10000ms (10
|
||||
seconds) and the maximum is 60000ms (1 minute).
|
||||
.sp
|
||||
This option can be changed in an active mount by writing to its file in
|
||||
the options directory in the mount's sysfs directory. Writing a new
|
||||
value will cause the next pending orphan scan to be rescheduled
|
||||
with the newly written delay time.
|
||||
.TP
|
||||
.B quorum_slot_nr=<number>
|
||||
The quorum_slot_nr option assigns a quorum member slot to the mount.
|
||||
The mount will use the slot assignment to claim exclusive ownership of
|
||||
|
||||
@@ -15,7 +15,7 @@ environment variable. If that variable is also absent the current working
|
||||
directory will be used.
|
||||
|
||||
.TP
|
||||
.BI "change-format-version [-V, --format-version VERS] [-F|--offline META-DEVICE DATA-DEVICE]"
|
||||
.BI "change-format-version [-V, --format-version VERS] [-F|--offline] META-DEVICE DATA-DEVICE"
|
||||
.sp
|
||||
Change the format version of an existing file system. The maxmimum
|
||||
supported version is used by default. A specific version in the range
|
||||
@@ -25,7 +25,7 @@ output of --help.
|
||||
.PD 0
|
||||
.TP
|
||||
.sp
|
||||
.B "-F, --offline META-DEVICE DATA-DEVICE"
|
||||
.B "-F, --offline"
|
||||
Change the format version by writing directly to the metadata and data
|
||||
devices. Like mkfs, this writes directly to the devices without
|
||||
protection and must only be used on completely unmounted devices. The
|
||||
@@ -43,7 +43,7 @@ the super blocks on both devices.
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "change-quorum-config {-Q|--quorum-slot} NR,ADDR,PORT [-F|--offline META-DEVICE DATA-DEVICE]"
|
||||
.BI "change-quorum-config {-Q|--quorum-slot NR,ADDR,PORT} [-F|--offline] META-DEVICE"
|
||||
.sp
|
||||
Change the quorum configuration for an existing file system. The new
|
||||
configuration completely replaces the old configuration. Any slots
|
||||
@@ -61,7 +61,7 @@ multiple arguments as described in the
|
||||
.B mkfs
|
||||
command.
|
||||
.TP
|
||||
.B "-F, --offline META-DEVICE"
|
||||
.B "-F, --offline"
|
||||
Perform the change offline by updating the superblock in the metadata
|
||||
device. The command will read the super block and refuse to make the
|
||||
change if it sees any evidence that the metadata device is currently in
|
||||
|
||||
@@ -222,7 +222,7 @@ static struct argp_option options[] = {
|
||||
static struct argp argp = {
|
||||
options,
|
||||
parse_opt,
|
||||
"",
|
||||
"META-DEVICE DATA-DEVICE",
|
||||
"Change format version of an existing ScoutFS filesystem"
|
||||
};
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ static struct argp_option options[] = {
|
||||
static struct argp argp = {
|
||||
options,
|
||||
parse_opt,
|
||||
"",
|
||||
"META-DEVICE",
|
||||
"Change quorum slots and addresses of an existing ScoutFS filesystem"
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user