mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-16 00:12:55 +00:00
Compare commits
44 Commits
zab/test_l
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7ca789c837 | ||
|
|
4d55fe6251 | ||
|
|
8f896d9783 | ||
|
|
e54f8d3ec0 | ||
|
|
d89e16214d | ||
|
|
b468352254 | ||
|
|
0eb9dfebdc | ||
|
|
f5750de244 | ||
|
|
f0c7996612 | ||
|
|
5143927e07 | ||
|
|
f495f52ec9 | ||
|
|
3dafeaac5b | ||
|
|
ef0f6f8ac2 | ||
|
|
c0cd29aa1b | ||
|
|
50bff13f21 | ||
|
|
de70ca2372 | ||
|
|
5af1412d5f | ||
|
|
0a2b2ad409 | ||
|
|
6c4590a8a0 | ||
|
|
1768f69c3c | ||
|
|
dcb0fd5805 | ||
|
|
660f874488 | ||
|
|
e1a6689a9b | ||
|
|
2884a92408 | ||
|
|
e194714004 | ||
|
|
8bb2f83cf9 | ||
|
|
6a9a6789d5 | ||
|
|
ee630b164f | ||
|
|
1c7678b6f5 | ||
|
|
22b5e79bbd | ||
|
|
259e639271 | ||
|
|
4d66c38c71 | ||
|
|
7ef62894bd | ||
|
|
1f363a1ead | ||
|
|
8ddf9b8c8c | ||
|
|
fd80c17ab6 | ||
|
|
991e2cbdf8 | ||
|
|
92ac132873 | ||
|
|
ad078cd93c | ||
|
|
90cb458cd5 | ||
|
|
1ab798e7eb | ||
|
|
e182914e51 | ||
|
|
8484a58dd6 | ||
|
|
a077104531 |
@@ -1,6 +1,41 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.26
|
||||
\
|
||||
*Nov 17, 2025*
|
||||
|
||||
Add the ino\_alloc\_per\_lock mount option. This changes the number of
|
||||
inode numbers allocated under each cluster lock and can alleviate lock
|
||||
contention for some patterns of larger file creation.
|
||||
|
||||
Add the tcp\_keepalive\_timeout\_ms mount option. This can enable the
|
||||
system to survive longer periods of networking outages.
|
||||
|
||||
Fix a rare double free of internal btree metadata blocks when merging
|
||||
log trees. The duplicated freed metadata block numbers would cause
|
||||
persistent errors in the server, preventing the server from starting and
|
||||
hanging the system.
|
||||
|
||||
Fix the data\_wait interface to not require the correct data\_version of
|
||||
the inode when raising an error. This lets callers raise errors when
|
||||
they're unable to recall the details of the inode to discover its
|
||||
data\_version.
|
||||
|
||||
Change scoutfs to more aggressively reclaim cached memory when under
|
||||
memory pressure. This makes scoutfs behave more like other kernel
|
||||
components and it integrates better with the reclaim policy heuristics
|
||||
in the VM core of the kernel.
|
||||
|
||||
Change scoutfs to more efficiently transmit and receive socket messages.
|
||||
Under heavy load this can process messages sufficiently more quickly to
|
||||
avoid hung task messages for tasks that were waiting for cluster lock
|
||||
messages to be processed.
|
||||
|
||||
Fix faulty server block commit budget calculations that were generating
|
||||
spurious "holders exceeded alloc budget" console messages.
|
||||
|
||||
---
|
||||
v1.25
|
||||
\
|
||||
|
||||
@@ -125,7 +125,6 @@
|
||||
EXPAND_COUNTER(item_update) \
|
||||
EXPAND_COUNTER(item_write_dirty) \
|
||||
EXPAND_COUNTER(lock_alloc) \
|
||||
EXPAND_COUNTER(lock_count_objects) \
|
||||
EXPAND_COUNTER(lock_free) \
|
||||
EXPAND_COUNTER(lock_grant_request) \
|
||||
EXPAND_COUNTER(lock_grant_response) \
|
||||
@@ -139,13 +138,13 @@
|
||||
EXPAND_COUNTER(lock_lock_error) \
|
||||
EXPAND_COUNTER(lock_nonblock_eagain) \
|
||||
EXPAND_COUNTER(lock_recover_request) \
|
||||
EXPAND_COUNTER(lock_scan_objects) \
|
||||
EXPAND_COUNTER(lock_shrink_attempted) \
|
||||
EXPAND_COUNTER(lock_shrink_aborted) \
|
||||
EXPAND_COUNTER(lock_shrink_work) \
|
||||
EXPAND_COUNTER(lock_shrink_request_failed) \
|
||||
EXPAND_COUNTER(lock_unlock) \
|
||||
EXPAND_COUNTER(lock_wait) \
|
||||
EXPAND_COUNTER(log_merge_complete) \
|
||||
EXPAND_COUNTER(log_merge_no_finalized) \
|
||||
EXPAND_COUNTER(log_merge_start) \
|
||||
EXPAND_COUNTER(log_merge_wait_timeout) \
|
||||
EXPAND_COUNTER(net_dropped_response) \
|
||||
EXPAND_COUNTER(net_send_bytes) \
|
||||
@@ -160,6 +159,7 @@
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_attempts) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_empty) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
|
||||
@@ -1482,12 +1482,6 @@ static int remove_index_items(struct super_block *sb, u64 ino,
|
||||
* Return an allocated and unused inode number. Returns -ENOSPC if
|
||||
* we're out of inode.
|
||||
*
|
||||
* Each parent directory has its own pool of free inode numbers. Items
|
||||
* are sorted by their inode numbers as they're stored in segments.
|
||||
* This will tend to group together files that are created in a
|
||||
* directory at the same time in segments. Concurrent creation across
|
||||
* different directories will be stored in their own regions.
|
||||
*
|
||||
* Inode numbers are never reclaimed. If the inode is evicted or we're
|
||||
* unmounted the pending inode numbers will be lost. Asking for a
|
||||
* relatively small number from the server each time will tend to
|
||||
@@ -1497,12 +1491,18 @@ static int remove_index_items(struct super_block *sb, u64 ino,
|
||||
int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret)
|
||||
{
|
||||
DECLARE_INODE_SB_INFO(sb, inf);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct inode_allocator *ia;
|
||||
u64 ino;
|
||||
u64 nr;
|
||||
int ret;
|
||||
|
||||
ia = is_dir ? &inf->dir_ino_alloc : &inf->ino_alloc;
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
if (is_dir && opts.ino_alloc_per_lock == SCOUTFS_LOCK_INODE_GROUP_NR)
|
||||
ia = &inf->dir_ino_alloc;
|
||||
else
|
||||
ia = &inf->ino_alloc;
|
||||
|
||||
spin_lock(&ia->lock);
|
||||
|
||||
@@ -1523,6 +1523,17 @@ int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret)
|
||||
*ino_ret = ia->ino++;
|
||||
ia->nr--;
|
||||
|
||||
if (opts.ino_alloc_per_lock != SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
nr = ia->ino & SCOUTFS_LOCK_INODE_GROUP_MASK;
|
||||
if (nr >= opts.ino_alloc_per_lock) {
|
||||
nr = SCOUTFS_LOCK_INODE_GROUP_NR - nr;
|
||||
if (nr > ia->nr)
|
||||
nr = ia->nr;
|
||||
ia->ino += nr;
|
||||
ia->nr -= nr;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&ia->lock);
|
||||
ret = 0;
|
||||
out:
|
||||
@@ -1626,10 +1637,14 @@ int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_
|
||||
struct scoutfs_lock *primary)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_orphan_key(&key, ino);
|
||||
|
||||
return scoutfs_item_delete_force(sb, &key, lock, primary);
|
||||
ret = scoutfs_item_delete_force(sb, &key, lock, primary);
|
||||
trace_scoutfs_inode_orphan_delete(sb, ino, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1711,6 +1726,8 @@ out:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
trace_scoutfs_delete_inode_end(sb, ino, mode, size, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1806,6 +1823,9 @@ out:
|
||||
* they've checked that the inode could really be deleted. We serialize
|
||||
* on a bit in the lock data so that we only have one deletion attempt
|
||||
* per inode under this mount's cluster lock.
|
||||
*
|
||||
* Returns -EAGAIN if we either did some cleanup work or are unable to finish
|
||||
* cleaning up this inode right now.
|
||||
*/
|
||||
static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
{
|
||||
@@ -1819,6 +1839,8 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
int bit_nr;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_try_delete(sb, ino);
|
||||
|
||||
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -1831,27 +1853,32 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
|
||||
/* only one local attempt per inode at a time */
|
||||
if (test_and_set_bit(bit_nr, ldata->trying)) {
|
||||
ret = 0;
|
||||
trace_scoutfs_try_delete_local_busy(sb, ino);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
clear_trying = true;
|
||||
|
||||
/* can't delete if it's cached in local or remote mounts */
|
||||
if (scoutfs_omap_test(sb, ino) || test_bit_le(bit_nr, ldata->map.bits)) {
|
||||
ret = 0;
|
||||
trace_scoutfs_try_delete_cached(sb, ino);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_inode_init_key(&key, ino);
|
||||
ret = lookup_inode_item(sb, &key, &sinode, lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
if (ret == -ENOENT) {
|
||||
trace_scoutfs_try_delete_no_item(sb, ino);
|
||||
ret = 0;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(sinode.nlink) > 0) {
|
||||
ret = 0;
|
||||
trace_scoutfs_try_delete_has_links(sb, ino, le32_to_cpu(sinode.nlink));
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1860,8 +1887,10 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
goto out;
|
||||
|
||||
ret = delete_inode_items(sb, ino, &sinode, lock, orph_lock);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
ret = -EAGAIN;
|
||||
scoutfs_inc_counter(sb, inode_deleted);
|
||||
}
|
||||
|
||||
out:
|
||||
if (clear_trying)
|
||||
@@ -2063,6 +2092,10 @@ void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb)
|
||||
* a locally cached inode. Then we ask the server for the open map
|
||||
* containing the inode. Only if we don't see any cached users do we do
|
||||
* the expensive work of acquiring locks to try and delete the items.
|
||||
*
|
||||
* We need to track whether there is any orphan cleanup work remaining so
|
||||
* that tests such as inode-deletion can watch the orphan_scan_empty counter
|
||||
* to determine when inode cleanup from open-unlink scenarios is complete.
|
||||
*/
|
||||
static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -2074,11 +2107,14 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key last;
|
||||
struct scoutfs_key key;
|
||||
bool work_todo = false;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
u64 ino;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_orphan_scan_start(sb);
|
||||
|
||||
scoutfs_inc_counter(sb, orphan_scan);
|
||||
|
||||
init_orphan_key(&last, U64_MAX);
|
||||
@@ -2098,8 +2134,10 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
init_orphan_key(&key, ino);
|
||||
ret = scoutfs_btree_next(sb, &roots.fs_root, &key, &iref);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
if (ret == -ENOENT) {
|
||||
trace_scoutfs_orphan_scan_work(sb, 0);
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -2114,6 +2152,7 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
|
||||
/* locally cached inodes will try to delete as they evict */
|
||||
if (scoutfs_omap_test(sb, ino)) {
|
||||
work_todo = true;
|
||||
scoutfs_inc_counter(sb, orphan_scan_cached);
|
||||
continue;
|
||||
}
|
||||
@@ -2129,13 +2168,22 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
|
||||
/* remote cached inodes will also try to delete */
|
||||
if (test_bit_le(bit_nr, omap.bits)) {
|
||||
work_todo = true;
|
||||
scoutfs_inc_counter(sb, orphan_scan_omap_set);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* seemingly orphaned and unused, get locks and check for sure */
|
||||
scoutfs_inc_counter(sb, orphan_scan_attempts);
|
||||
trace_scoutfs_orphan_scan_work(sb, ino);
|
||||
|
||||
ret = try_delete_inode_items(sb, ino);
|
||||
if (ret == -EAGAIN) {
|
||||
work_todo = true;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
trace_scoutfs_orphan_scan_end(sb, ino, ret);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
@@ -2144,6 +2192,11 @@ out:
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, orphan_scan_error);
|
||||
|
||||
if (!work_todo)
|
||||
scoutfs_inc_counter(sb, orphan_scan_empty);
|
||||
|
||||
trace_scoutfs_orphan_scan_stop(sb, work_todo);
|
||||
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
}
|
||||
|
||||
|
||||
248
kmod/src/lock.c
248
kmod/src/lock.c
@@ -53,8 +53,10 @@
|
||||
* all access to the lock (by revoking it down to a null mode) then the
|
||||
* lock is freed.
|
||||
*
|
||||
* Memory pressure on the client can cause the client to request a null
|
||||
* mode from the server so that once its granted the lock can be freed.
|
||||
* Each client has a configurable number of locks that are allowed to
|
||||
* remain idle after being granted, for use by future tasks. Past the
|
||||
* limit locks are freed by requesting a null mode from the server,
|
||||
* governed by a LRU.
|
||||
*
|
||||
* So far we've only needed a minimal trylock. We return -EAGAIN if a
|
||||
* lock attempt can't immediately match an existing granted lock. This
|
||||
@@ -79,14 +81,11 @@ struct lock_info {
|
||||
bool unmounting;
|
||||
struct rb_root lock_tree;
|
||||
struct rb_root lock_range_tree;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
u64 nr_locks;
|
||||
struct list_head lru_list;
|
||||
unsigned long long lru_nr;
|
||||
struct workqueue_struct *workq;
|
||||
struct work_struct inv_work;
|
||||
struct list_head inv_list;
|
||||
struct work_struct shrink_work;
|
||||
struct list_head shrink_list;
|
||||
atomic64_t next_refresh_gen;
|
||||
|
||||
struct dentry *tseq_dentry;
|
||||
@@ -249,7 +248,6 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
BUG_ON(!RB_EMPTY_NODE(&lock->range_node));
|
||||
BUG_ON(!list_empty(&lock->lru_head));
|
||||
BUG_ON(!list_empty(&lock->inv_head));
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
BUG_ON(!list_empty(&lock->cov_list));
|
||||
|
||||
kfree(lock->inode_deletion_data);
|
||||
@@ -277,7 +275,6 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
INIT_LIST_HEAD(&lock->lru_head);
|
||||
INIT_LIST_HEAD(&lock->inv_head);
|
||||
INIT_LIST_HEAD(&lock->inv_list);
|
||||
INIT_LIST_HEAD(&lock->shrink_head);
|
||||
spin_lock_init(&lock->cov_list_lock);
|
||||
INIT_LIST_HEAD(&lock->cov_list);
|
||||
|
||||
@@ -410,6 +407,7 @@ static bool lock_insert(struct super_block *sb, struct scoutfs_lock *ins)
|
||||
rb_link_node(&ins->node, parent, node);
|
||||
rb_insert_color(&ins->node, &linfo->lock_tree);
|
||||
|
||||
linfo->nr_locks++;
|
||||
scoutfs_tseq_add(&linfo->tseq_tree, &ins->tseq_entry);
|
||||
|
||||
return true;
|
||||
@@ -424,6 +422,7 @@ static void lock_remove(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
rb_erase(&lock->range_node, &linfo->lock_range_tree);
|
||||
RB_CLEAR_NODE(&lock->range_node);
|
||||
|
||||
linfo->nr_locks--;
|
||||
scoutfs_tseq_del(&linfo->tseq_tree, &lock->tseq_entry);
|
||||
}
|
||||
|
||||
@@ -463,10 +462,8 @@ static void __lock_del_lru(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
|
||||
if (!list_empty(&lock->lru_head)) {
|
||||
if (!list_empty(&lock->lru_head))
|
||||
list_del_init(&lock->lru_head);
|
||||
linfo->lru_nr--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -525,14 +522,16 @@ static struct scoutfs_lock *create_lock(struct super_block *sb,
|
||||
* indicate that the lock wasn't idle. If it really is idle then we
|
||||
* either free it if it's null or put it back on the lru.
|
||||
*/
|
||||
static void put_lock(struct lock_info *linfo,struct scoutfs_lock *lock)
|
||||
static void __put_lock(struct lock_info *linfo, struct scoutfs_lock *lock, bool tail)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
|
||||
if (lock_idle(lock)) {
|
||||
if (lock->mode != SCOUTFS_LOCK_NULL) {
|
||||
list_add_tail(&lock->lru_head, &linfo->lru_list);
|
||||
linfo->lru_nr++;
|
||||
if (tail)
|
||||
list_add_tail(&lock->lru_head, &linfo->lru_list);
|
||||
else
|
||||
list_add(&lock->lru_head, &linfo->lru_list);
|
||||
} else {
|
||||
lock_remove(linfo, lock);
|
||||
lock_free(linfo, lock);
|
||||
@@ -540,6 +539,11 @@ static void put_lock(struct lock_info *linfo,struct scoutfs_lock *lock)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_lock(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
{
|
||||
__put_lock(linfo, lock, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has made a change (set a lock mode) which can let one of the
|
||||
* invalidating locks make forward progress.
|
||||
@@ -713,14 +717,14 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
/* only lock protocol, inv can't call subsystems after shutdown */
|
||||
if (!linfo->shutdown) {
|
||||
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
|
||||
BUG_ON(ret);
|
||||
BUG_ON(ret < 0 && ret != -ENOLINK);
|
||||
}
|
||||
|
||||
/* respond with the key and modes from the request, server might have died */
|
||||
ret = scoutfs_client_lock_response(sb, ireq->net_id, nl);
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
BUG_ON(ret);
|
||||
BUG_ON(ret < 0 && ret != -ENOLINK);
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_response);
|
||||
}
|
||||
@@ -875,6 +879,69 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called on every _lock call to try and keep the number of
|
||||
* locks under the idle count. We're intentionally trying to throttle
|
||||
* shrinking bursts by tying its frequency to lock use. It will only
|
||||
* send requests to free unused locks, though, so it's always possible
|
||||
* to exceed the high water mark under heavy load.
|
||||
*
|
||||
* We send a null request and the lock will be freed by the response
|
||||
* once all users drain. If this races with invalidation then the
|
||||
* server will only send the grant response once the invalidation is
|
||||
* finished.
|
||||
*/
|
||||
static bool try_shrink_lock(struct super_block *sb, struct lock_info *linfo, bool force)
|
||||
{
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct scoutfs_net_lock nl;
|
||||
int ret = 0;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
/* avoiding lock contention with unsynchronized test, don't mind temp false results */
|
||||
if (!force && (list_empty(&linfo->lru_list) ||
|
||||
READ_ONCE(linfo->nr_locks) <= opts.lock_idle_count))
|
||||
return false;
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
lock = list_first_entry_or_null(&linfo->lru_list, struct scoutfs_lock, lru_head);
|
||||
if (lock && (force || (linfo->nr_locks > opts.lock_idle_count))) {
|
||||
__lock_del_lru(linfo, lock);
|
||||
lock->request_pending = 1;
|
||||
|
||||
nl.key = lock->start;
|
||||
nl.old_mode = lock->mode;
|
||||
nl.new_mode = SCOUTFS_LOCK_NULL;
|
||||
} else {
|
||||
lock = NULL;
|
||||
}
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
if (lock) {
|
||||
ret = scoutfs_client_lock_request(sb, &nl);
|
||||
if (ret < 0) {
|
||||
scoutfs_inc_counter(sb, lock_shrink_request_failed);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
lock->request_pending = 0;
|
||||
wake_up(&lock->waitq);
|
||||
__put_lock(linfo, lock, false);
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
} else {
|
||||
scoutfs_inc_counter(sb, lock_shrink_attempted);
|
||||
trace_scoutfs_lock_shrink(sb, lock);
|
||||
}
|
||||
}
|
||||
|
||||
return lock && ret == 0;
|
||||
}
|
||||
|
||||
static bool lock_wait_cond(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode mode)
|
||||
{
|
||||
@@ -937,6 +1004,8 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
|
||||
if (WARN_ON_ONCE(scoutfs_trans_held()))
|
||||
return -EDEADLK;
|
||||
|
||||
try_shrink_lock(sb, linfo, false);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
/* drops and re-acquires lock if it allocates */
|
||||
@@ -1380,134 +1449,12 @@ bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
&lock->start, &lock->end) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The shrink callback got the lock, marked it request_pending, and put
|
||||
* it on the shrink list. We send a null request and the lock will be
|
||||
* freed by the response once all users drain. If this races with
|
||||
* invalidation then the server will only send the grant response once
|
||||
* the invalidation is finished.
|
||||
*/
|
||||
static void lock_shrink_worker(struct work_struct *work)
|
||||
{
|
||||
struct lock_info *linfo = container_of(work, struct lock_info,
|
||||
shrink_work);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_net_lock nl;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
LIST_HEAD(list);
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_shrink_work);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
list_splice_init(&linfo->shrink_list, &list);
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &list, shrink_head) {
|
||||
list_del_init(&lock->shrink_head);
|
||||
|
||||
/* unlocked lock access, but should be stable since we queued */
|
||||
nl.key = lock->start;
|
||||
nl.old_mode = lock->mode;
|
||||
nl.new_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
ret = scoutfs_client_lock_request(sb, &nl);
|
||||
if (ret) {
|
||||
/* oh well, not freeing */
|
||||
scoutfs_inc_counter(sb, lock_shrink_aborted);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
lock->request_pending = 0;
|
||||
wake_up(&lock->waitq);
|
||||
put_lock(linfo, lock);
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long lock_count_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lock_info *linfo = KC_SHRINKER_CONTAINER_OF(shrink, struct lock_info);
|
||||
struct super_block *sb = linfo->sb;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_count_objects);
|
||||
|
||||
return shrinker_min_long(linfo->lru_nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the shrinking process for locks on the lru. If a lock is on
|
||||
* the lru then it can't have any active users. We don't want to block
|
||||
* or allocate here so all we do is get the lock, mark it request
|
||||
* pending, and kick off the work. The work sends a null request and
|
||||
* eventually the lock is freed by its response.
|
||||
*
|
||||
* Only a racing lock attempt that isn't matched can prevent the lock
|
||||
* from being freed. It'll block waiting to send its request for its
|
||||
* mode which will prevent the lock from being freed when the null
|
||||
* response arrives.
|
||||
*/
|
||||
static unsigned long lock_scan_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lock_info *linfo = KC_SHRINKER_CONTAINER_OF(shrink, struct lock_info);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
unsigned long freed = 0;
|
||||
unsigned long nr = sc->nr_to_scan;
|
||||
bool added = false;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_scan_objects);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
restart:
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->lru_list, lru_head) {
|
||||
|
||||
BUG_ON(!lock_idle(lock));
|
||||
BUG_ON(lock->mode == SCOUTFS_LOCK_NULL);
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
|
||||
if (nr-- == 0)
|
||||
break;
|
||||
|
||||
__lock_del_lru(linfo, lock);
|
||||
lock->request_pending = 1;
|
||||
list_add_tail(&lock->shrink_head, &linfo->shrink_list);
|
||||
added = true;
|
||||
freed++;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_shrink_attempted);
|
||||
trace_scoutfs_lock_shrink(sb, lock);
|
||||
|
||||
/* could have bazillions of idle locks */
|
||||
if (cond_resched_lock(&linfo->lock))
|
||||
goto restart;
|
||||
}
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
if (added)
|
||||
queue_work(linfo->workq, &linfo->shrink_work);
|
||||
|
||||
trace_scoutfs_lock_shrink_exit(sb, sc->nr_to_scan, freed);
|
||||
return freed;
|
||||
}
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb)
|
||||
{
|
||||
struct lock_info *linfo = SCOUTFS_SB(sb)->lock_info;
|
||||
struct shrink_control sc = {
|
||||
.gfp_mask = GFP_NOFS,
|
||||
.nr_to_scan = INT_MAX,
|
||||
};
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
|
||||
lock_scan_objects(KC_SHRINKER_FN(&linfo->shrinker), &sc);
|
||||
while (try_shrink_lock(sb, linfo, true))
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
@@ -1590,10 +1537,10 @@ u64 scoutfs_lock_ino_refresh_gen(struct super_block *sb, u64 ino)
|
||||
* transitions and sending requests. We set the shutdown flag to catch
|
||||
* anyone who breaks this rule.
|
||||
*
|
||||
* We unregister the shrinker so that we won't try and send null
|
||||
* requests in response to memory pressure. The locks will all be
|
||||
* unceremoniously dropped once we get a farewell response from the
|
||||
* server which indicates that they destroyed our locking state.
|
||||
* With no more lock callers, we'll no longer try to shrink the pool of
|
||||
* granted locks. We'll free all of them as _destroy() is called after
|
||||
* the farewell response indicates that the server tore down all our
|
||||
* lock state.
|
||||
*
|
||||
* We will still respond to invalidation requests that have to be
|
||||
* processed to let unmount in other mounts acquire locks and make
|
||||
@@ -1613,10 +1560,6 @@ void scoutfs_lock_shutdown(struct super_block *sb)
|
||||
|
||||
trace_scoutfs_lock_shutdown(sb, linfo);
|
||||
|
||||
/* stop the shrinker from queueing work */
|
||||
KC_UNREGISTER_SHRINKER(&linfo->shrinker);
|
||||
flush_work(&linfo->shrink_work);
|
||||
|
||||
/* cause current and future lock calls to return errors */
|
||||
spin_lock(&linfo->lock);
|
||||
linfo->shutdown = true;
|
||||
@@ -1707,8 +1650,6 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
list_del_init(&lock->inv_head);
|
||||
lock->invalidate_pending = 0;
|
||||
}
|
||||
if (!list_empty(&lock->shrink_head))
|
||||
list_del_init(&lock->shrink_head);
|
||||
lock_remove(linfo, lock);
|
||||
lock_free(linfo, lock);
|
||||
}
|
||||
@@ -1733,14 +1674,9 @@ int scoutfs_lock_setup(struct super_block *sb)
|
||||
spin_lock_init(&linfo->lock);
|
||||
linfo->lock_tree = RB_ROOT;
|
||||
linfo->lock_range_tree = RB_ROOT;
|
||||
KC_INIT_SHRINKER_FUNCS(&linfo->shrinker, lock_count_objects,
|
||||
lock_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&linfo->shrinker, "scoutfs-lock:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_LIST_HEAD(&linfo->lru_list);
|
||||
INIT_WORK(&linfo->inv_work, lock_invalidate_worker);
|
||||
INIT_LIST_HEAD(&linfo->inv_list);
|
||||
INIT_WORK(&linfo->shrink_work, lock_shrink_worker);
|
||||
INIT_LIST_HEAD(&linfo->shrink_list);
|
||||
atomic64_set(&linfo->next_refresh_gen, 0);
|
||||
scoutfs_tseq_tree_init(&linfo->tseq_tree, lock_tseq_show);
|
||||
|
||||
|
||||
@@ -506,6 +506,19 @@ out:
|
||||
* because we don't know which locks they'll hold. Once recover
|
||||
* finishes the server calls us to kick all the locks that were waiting
|
||||
* during recovery.
|
||||
*
|
||||
* The calling server shuts down if we return errors indicating that we
|
||||
* weren't able to ensure forward progress in the lock state machine.
|
||||
*
|
||||
* Failure to send to a disconnected client is not a fatal error.
|
||||
* During normal disconnection the client's state is removed before
|
||||
* their connection is destroyed. We can't use state to try and send to
|
||||
* a non-existing connection. But a client that fails to reconnect is
|
||||
* disconnected before being fenced. If we have multiple disconnected
|
||||
* clients we can try to send to one while cleaning up another. If
|
||||
* they've uncleanly disconnected their locks are going to be removed
|
||||
* and the lock can make forward progress again. Or we'll shutdown for
|
||||
* failure to fence.
|
||||
*/
|
||||
static int process_waiting_requests(struct super_block *sb,
|
||||
struct server_lock_node *snode)
|
||||
@@ -597,6 +610,10 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
out:
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
/* disconnected clients will be fenced, trying to send to them isn't fatal */
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,12 @@ do { \
|
||||
} \
|
||||
} while (0) \
|
||||
|
||||
#define scoutfs_bug_on_err(sb, err, fmt, args...) \
|
||||
do { \
|
||||
__typeof__(err) _err = (err); \
|
||||
scoutfs_bug_on(sb, _err < 0 && _err != -ENOLINK, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Each message is only generated once per volume. Remounting resets
|
||||
* the messages.
|
||||
|
||||
191
kmod/src/net.c
191
kmod/src/net.c
@@ -21,6 +21,7 @@
|
||||
#include <net/tcp.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -125,6 +126,7 @@ struct message_send {
|
||||
unsigned long dead:1;
|
||||
struct list_head head;
|
||||
scoutfs_net_response_t resp_func;
|
||||
struct rb_node node;
|
||||
void *resp_data;
|
||||
struct scoutfs_net_header nh;
|
||||
};
|
||||
@@ -161,49 +163,118 @@ static bool nh_is_request(struct scoutfs_net_header *nh)
|
||||
return !nh_is_response(nh);
|
||||
}
|
||||
|
||||
static int cmp_sorted_msend(u64 pos, struct message_send *msend)
|
||||
{
|
||||
if (nh_is_request(&msend->nh))
|
||||
return pos < le64_to_cpu(msend->nh.id) ? -1 :
|
||||
pos > le64_to_cpu(msend->nh.id) ? 1 : 0;
|
||||
else
|
||||
return pos < le64_to_cpu(msend->nh.seq) ? -1 :
|
||||
pos > le64_to_cpu(msend->nh.seq) ? 1 : 0;
|
||||
}
|
||||
|
||||
static struct message_send *search_sorted_msends(struct rb_root *root, u64 pos, struct rb_node *ins)
|
||||
{
|
||||
struct rb_node **node = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct message_send *msend = NULL;
|
||||
struct message_send *next = NULL;
|
||||
int cmp = -1;
|
||||
|
||||
while (*node) {
|
||||
parent = *node;
|
||||
msend = container_of(*node, struct message_send, node);
|
||||
|
||||
cmp = cmp_sorted_msend(pos, msend);
|
||||
if (cmp < 0) {
|
||||
next = msend;
|
||||
node = &(*node)->rb_left;
|
||||
} else if (cmp > 0) {
|
||||
node = &(*node)->rb_right;
|
||||
} else {
|
||||
next = msend;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(cmp == 0 && ins);
|
||||
|
||||
if (ins) {
|
||||
rb_link_node(ins, parent, node);
|
||||
rb_insert_color(ins, root);
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
static struct message_send *next_sorted_msend(struct message_send *msend)
|
||||
{
|
||||
struct rb_node *node = rb_next(&msend->node);
|
||||
|
||||
return node ? rb_entry(node, struct message_send, node) : NULL;
|
||||
}
|
||||
|
||||
#define for_each_sorted_msend(MSEND_, TMP_, ROOT_, POS_) \
|
||||
for (MSEND_ = search_sorted_msends(ROOT_, POS_, NULL); \
|
||||
MSEND_ != NULL && ({ TMP_ = next_sorted_msend(MSEND_); true; }); \
|
||||
MSEND_ = TMP_)
|
||||
|
||||
static void insert_sorted_msend(struct scoutfs_net_connection *conn, struct message_send *msend)
|
||||
{
|
||||
BUG_ON(!RB_EMPTY_NODE(&msend->node));
|
||||
|
||||
if (nh_is_request(&msend->nh))
|
||||
search_sorted_msends(&conn->req_root, le64_to_cpu(msend->nh.id), &msend->node);
|
||||
else
|
||||
search_sorted_msends(&conn->resp_root, le64_to_cpu(msend->nh.seq), &msend->node);
|
||||
}
|
||||
|
||||
static void erase_sorted_msend(struct scoutfs_net_connection *conn, struct message_send *msend)
|
||||
{
|
||||
if (!RB_EMPTY_NODE(&msend->node)) {
|
||||
if (nh_is_request(&msend->nh))
|
||||
rb_erase(&msend->node, &conn->req_root);
|
||||
else
|
||||
rb_erase(&msend->node, &conn->resp_root);
|
||||
RB_CLEAR_NODE(&msend->node);
|
||||
}
|
||||
}
|
||||
|
||||
static void move_sorted_msends(struct scoutfs_net_connection *dst_conn, struct rb_root *dst_root,
|
||||
struct scoutfs_net_connection *src_conn, struct rb_root *src_root)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
|
||||
for_each_sorted_msend(msend, tmp, src_root, 0) {
|
||||
erase_sorted_msend(src_conn, msend);
|
||||
insert_sorted_msend(dst_conn, msend);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We return dead requests so that the caller can stop searching other
|
||||
* lists for the dead request that we found.
|
||||
* Pending requests are uniquely identified by the id they were assigned
|
||||
* as they were first put on the send queue.
|
||||
*/
|
||||
static struct message_send *search_list(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list,
|
||||
u8 cmd, u64 id)
|
||||
static struct message_send *find_request(struct scoutfs_net_connection *conn, u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
list_for_each_entry(msend, list, head) {
|
||||
if (nh_is_request(&msend->nh) && msend->nh.cmd == cmd &&
|
||||
le64_to_cpu(msend->nh.id) == id)
|
||||
return msend;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an active send request on the lists. It's almost certainly
|
||||
* waiting on the resend queue but it could be actively being sent.
|
||||
*/
|
||||
static struct message_send *find_request(struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
msend = search_list(conn, &conn->resend_queue, cmd, id) ?:
|
||||
search_list(conn, &conn->send_queue, cmd, id);
|
||||
if (msend && msend->dead)
|
||||
msend = search_sorted_msends(&conn->req_root, id, NULL);
|
||||
if (msend && !(msend->nh.cmd == cmd && le64_to_cpu(msend->nh.id) == id))
|
||||
msend = NULL;
|
||||
|
||||
return msend;
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete a send message by moving it to the send queue and marking it
|
||||
* to be freed. It won't be visible to callers trying to find sends.
|
||||
* Free a send message by moving it to the send queue and marking it
|
||||
* dead. It is removed from the sorted rb roots so it won't be visible
|
||||
* as a request for response processing.
|
||||
*/
|
||||
static void complete_send(struct scoutfs_net_connection *conn,
|
||||
struct message_send *msend)
|
||||
static void queue_dead_free(struct scoutfs_net_connection *conn, struct message_send *msend)
|
||||
{
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
@@ -213,6 +284,7 @@ static void complete_send(struct scoutfs_net_connection *conn,
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
erase_sorted_msend(conn, msend);
|
||||
queue_work(conn->workq, &conn->send_work);
|
||||
}
|
||||
|
||||
@@ -370,6 +442,7 @@ static int submit_send(struct super_block *sb,
|
||||
msend->resp_func = resp_func;
|
||||
msend->resp_data = resp_data;
|
||||
msend->dead = 0;
|
||||
RB_CLEAR_NODE(&msend->node);
|
||||
|
||||
msend->nh.seq = cpu_to_le64(seq);
|
||||
msend->nh.recv_seq = 0; /* set when sent, not when queued */
|
||||
@@ -390,6 +463,7 @@ static int submit_send(struct super_block *sb,
|
||||
} else {
|
||||
list_add_tail(&msend->head, &conn->resend_queue);
|
||||
}
|
||||
insert_sorted_msend(conn, msend);
|
||||
|
||||
if (id_ret)
|
||||
*id_ret = le64_to_cpu(msend->nh.id);
|
||||
@@ -459,7 +533,7 @@ static int process_response(struct scoutfs_net_connection *conn,
|
||||
if (msend) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
complete_send(conn, msend);
|
||||
queue_dead_free(conn, msend);
|
||||
} else {
|
||||
scoutfs_inc_counter(sb, net_dropped_response);
|
||||
}
|
||||
@@ -550,43 +624,21 @@ static void queue_ordered_proc(struct scoutfs_net_connection *conn, struct messa
|
||||
* Free live responses up to and including the seq by marking them dead
|
||||
* and moving them to the send queue to be freed.
|
||||
*/
|
||||
static bool move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
bool moved = false;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
list_for_each_entry_safe(msend, tmp, list, head) {
|
||||
if (le64_to_cpu(msend->nh.seq) > seq)
|
||||
break;
|
||||
if (!nh_is_response(&msend->nh) || msend->dead)
|
||||
continue;
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
moved = true;
|
||||
}
|
||||
|
||||
return moved;
|
||||
}
|
||||
|
||||
/* acks are processed inline in the recv worker */
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
bool moved;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) |
|
||||
move_acked_responses(conn, &conn->resend_queue, seq);
|
||||
for_each_sorted_msend(msend, tmp, &conn->resp_root, 0) {
|
||||
if (le64_to_cpu(msend->nh.seq) > seq)
|
||||
break;
|
||||
|
||||
queue_dead_free(conn, msend);
|
||||
}
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
if (moved)
|
||||
queue_work(conn->workq, &conn->send_work);
|
||||
}
|
||||
|
||||
static int k_recvmsg(struct socket *sock, void *buf, unsigned len)
|
||||
@@ -824,9 +876,11 @@ static int k_sendmsg_full(struct socket *sock, struct kvec *kv, unsigned long nr
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_msend(struct net_info *ninf, struct message_send *msend)
|
||||
static void free_msend(struct net_info *ninf, struct scoutfs_net_connection *conn,
|
||||
struct message_send *msend)
|
||||
{
|
||||
list_del_init(&msend->head);
|
||||
erase_sorted_msend(conn, msend);
|
||||
scoutfs_tseq_del(&ninf->msg_tseq_tree, &msend->tseq_entry);
|
||||
kfree(msend);
|
||||
}
|
||||
@@ -866,9 +920,10 @@ static void scoutfs_net_send_worker(struct work_struct *work)
|
||||
count = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
list_for_each_entry_safe(msend, _msend_, &conn->send_queue, head) {
|
||||
if (msend->dead) {
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -957,7 +1012,7 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
|
||||
list_splice_init(&conn->resend_queue, &conn->send_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head)
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
|
||||
/* accepted sockets are removed from their listener's list */
|
||||
if (conn->listening_conn) {
|
||||
@@ -1303,7 +1358,7 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
struct message_send, head))) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
call_resp_func(sb, conn, resp_func, resp_data, NULL, 0, -ECONNABORTED);
|
||||
@@ -1319,7 +1374,7 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
list_splice_tail_init(&conn->send_queue, &conn->resend_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->resend_queue, head) {
|
||||
if (msend->nh.cmd == SCOUTFS_NET_CMD_GREETING)
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
}
|
||||
|
||||
clear_conn_fl(conn, saw_greeting);
|
||||
@@ -1493,6 +1548,8 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
atomic64_set(&conn->recv_seq, 0);
|
||||
INIT_LIST_HEAD(&conn->send_queue);
|
||||
INIT_LIST_HEAD(&conn->resend_queue);
|
||||
conn->req_root = RB_ROOT;
|
||||
conn->resp_root = RB_ROOT;
|
||||
INIT_WORK(&conn->listen_work, scoutfs_net_listen_worker);
|
||||
INIT_WORK(&conn->connect_work, scoutfs_net_connect_worker);
|
||||
INIT_WORK(&conn->send_work, scoutfs_net_send_worker);
|
||||
@@ -1705,7 +1762,7 @@ void scoutfs_net_client_greeting(struct super_block *sb,
|
||||
atomic64_set(&conn->recv_seq, 0);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->resend_queue, head){
|
||||
if (nh_is_response(&msend->nh))
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1808,6 +1865,8 @@ restart:
|
||||
BUG_ON(!list_empty(&reconn->send_queue));
|
||||
/* queued greeting response is racing, can be in send or resend queue */
|
||||
list_splice_tail_init(&reconn->resend_queue, &conn->resend_queue);
|
||||
move_sorted_msends(conn, &conn->req_root, reconn, &reconn->req_root);
|
||||
move_sorted_msends(conn, &conn->resp_root, reconn, &reconn->resp_root);
|
||||
|
||||
/* new conn info is unused, swap, old won't call down */
|
||||
swap(conn->info, reconn->info);
|
||||
|
||||
@@ -67,6 +67,8 @@ struct scoutfs_net_connection {
|
||||
u64 next_send_id;
|
||||
struct list_head send_queue;
|
||||
struct list_head resend_queue;
|
||||
struct rb_root req_root;
|
||||
struct rb_root resp_root;
|
||||
|
||||
atomic64_t recv_seq;
|
||||
unsigned int ordered_proc_nr;
|
||||
|
||||
@@ -33,6 +33,8 @@ enum {
|
||||
Opt_acl,
|
||||
Opt_data_prealloc_blocks,
|
||||
Opt_data_prealloc_contig_only,
|
||||
Opt_ino_alloc_per_lock,
|
||||
Opt_lock_idle_count,
|
||||
Opt_log_merge_wait_timeout_ms,
|
||||
Opt_metadev_path,
|
||||
Opt_noacl,
|
||||
@@ -47,6 +49,8 @@ static const match_table_t tokens = {
|
||||
{Opt_acl, "acl"},
|
||||
{Opt_data_prealloc_blocks, "data_prealloc_blocks=%s"},
|
||||
{Opt_data_prealloc_contig_only, "data_prealloc_contig_only=%s"},
|
||||
{Opt_ino_alloc_per_lock, "ino_alloc_per_lock=%s"},
|
||||
{Opt_lock_idle_count, "lock_idle_count=%s"},
|
||||
{Opt_log_merge_wait_timeout_ms, "log_merge_wait_timeout_ms=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_noacl, "noacl"},
|
||||
@@ -117,6 +121,10 @@ static void free_options(struct scoutfs_mount_options *opts)
|
||||
kfree(opts->metadev_path);
|
||||
}
|
||||
|
||||
#define MIN_LOCK_IDLE_COUNT 32
|
||||
#define DEFAULT_LOCK_IDLE_COUNT (10 * 1000)
|
||||
#define MAX_LOCK_IDLE_COUNT (100 * 1000)
|
||||
|
||||
#define MIN_LOG_MERGE_WAIT_TIMEOUT_MS 100UL
|
||||
#define DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS 500
|
||||
#define MAX_LOG_MERGE_WAIT_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
@@ -136,6 +144,8 @@ static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
|
||||
opts->data_prealloc_blocks = SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS;
|
||||
opts->data_prealloc_contig_only = 1;
|
||||
opts->ino_alloc_per_lock = SCOUTFS_LOCK_INODE_GROUP_NR;
|
||||
opts->lock_idle_count = DEFAULT_LOCK_IDLE_COUNT;
|
||||
opts->log_merge_wait_timeout_ms = DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS;
|
||||
opts->orphan_scan_delay_ms = -1;
|
||||
opts->quorum_heartbeat_timeout_ms = SCOUTFS_QUORUM_DEF_HB_TIMEO_MS;
|
||||
@@ -143,6 +153,21 @@ static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
opts->tcp_keepalive_timeout_ms = DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
static int verify_lock_idle_count(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse lock_idle_count value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < MIN_LOCK_IDLE_COUNT || val > MAX_LOCK_IDLE_COUNT) {
|
||||
scoutfs_err(sb, "invalid lock_idle_count value %d, must be between %u and %u",
|
||||
val, MIN_LOCK_IDLE_COUNT, MAX_LOCK_IDLE_COUNT);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_log_merge_wait_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
@@ -238,6 +263,18 @@ static int parse_options(struct super_block *sb, char *options, struct scoutfs_m
|
||||
opts->data_prealloc_contig_only = nr;
|
||||
break;
|
||||
|
||||
case Opt_ino_alloc_per_lock:
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 1 || nr > SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
scoutfs_err(sb, "invalid ino_alloc_per_lock option, must be between 1 and %u",
|
||||
SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->ino_alloc_per_lock = nr;
|
||||
break;
|
||||
|
||||
case Opt_tcp_keepalive_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_tcp_keepalive_timeout_ms(sb, ret, nr);
|
||||
@@ -246,6 +283,14 @@ static int parse_options(struct super_block *sb, char *options, struct scoutfs_m
|
||||
opts->tcp_keepalive_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_lock_idle_count:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_lock_idle_count(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->lock_idle_count = nr;
|
||||
break;
|
||||
|
||||
case Opt_log_merge_wait_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, nr);
|
||||
@@ -393,6 +438,7 @@ int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
seq_puts(seq, ",acl");
|
||||
seq_printf(seq, ",data_prealloc_blocks=%llu", opts.data_prealloc_blocks);
|
||||
seq_printf(seq, ",data_prealloc_contig_only=%u", opts.data_prealloc_contig_only);
|
||||
seq_printf(seq, ",ino_alloc_per_lock=%u", opts.ino_alloc_per_lock);
|
||||
seq_printf(seq, ",metadev_path=%s", opts.metadev_path);
|
||||
if (!is_acl)
|
||||
seq_puts(seq, ",noacl");
|
||||
@@ -481,6 +527,82 @@ static ssize_t data_prealloc_contig_only_store(struct kobject *kobj, struct kobj
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_contig_only);
|
||||
|
||||
static ssize_t ino_alloc_per_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.ino_alloc_per_lock);
|
||||
}
|
||||
static ssize_t ino_alloc_per_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < 1 || val > SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
scoutfs_err(sb, "invalid ino_alloc_per_lock option, must be between 1 and %u",
|
||||
SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.ino_alloc_per_lock = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(ino_alloc_per_lock);
|
||||
|
||||
static ssize_t lock_idle_count_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.lock_idle_count);
|
||||
}
|
||||
static ssize_t lock_idle_count_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
int val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoint(nullterm, 0, &val);
|
||||
ret = verify_lock_idle_count(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.lock_idle_count = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(lock_idle_count);
|
||||
|
||||
static ssize_t log_merge_wait_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -621,6 +743,8 @@ SCOUTFS_ATTR_RO(quorum_slot_nr);
|
||||
static struct attribute *options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_blocks),
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_contig_only),
|
||||
SCOUTFS_ATTR_PTR(ino_alloc_per_lock),
|
||||
SCOUTFS_ATTR_PTR(lock_idle_count),
|
||||
SCOUTFS_ATTR_PTR(log_merge_wait_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(orphan_scan_delay_ms),
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
struct scoutfs_mount_options {
|
||||
u64 data_prealloc_blocks;
|
||||
bool data_prealloc_contig_only;
|
||||
unsigned int ino_alloc_per_lock;
|
||||
int lock_idle_count;
|
||||
unsigned int log_merge_wait_timeout_ms;
|
||||
char *metadev_path;
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
|
||||
@@ -789,6 +789,80 @@ TRACE_EVENT(scoutfs_inode_walk_writeback,
|
||||
__entry->ino, __entry->write, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_start,
|
||||
TP_PROTO(struct super_block *sb),
|
||||
|
||||
TP_ARGS(sb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF, SCSB_TRACE_ARGS)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_stop,
|
||||
TP_PROTO(struct super_block *sb, bool work_todo),
|
||||
|
||||
TP_ARGS(sb, work_todo),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(bool, work_todo)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->work_todo = work_todo;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" work_todo %d", SCSB_TRACE_ARGS, __entry->work_todo)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_work,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino),
|
||||
|
||||
TP_ARGS(sb, ino),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu", SCSB_TRACE_ARGS,
|
||||
__entry->ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_end,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu ret %d", SCSB_TRACE_ARGS,
|
||||
__entry->ino, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_lock_info_class,
|
||||
TP_PROTO(struct super_block *sb, struct lock_info *linfo),
|
||||
|
||||
@@ -1036,6 +1110,82 @@ TRACE_EVENT(scoutfs_orphan_inode,
|
||||
MINOR(__entry->dev), __entry->ino)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_try_delete_class,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
),
|
||||
TP_printk(SCSBF" ino %llu", SCSB_TRACE_ARGS, __entry->ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete_local_busy,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete_cached,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete_no_item,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_try_delete_has_links,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, unsigned int nlink),
|
||||
|
||||
TP_ARGS(sb, ino, nlink),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(unsigned int, nlink)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->nlink = nlink;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu nlink %u", SCSB_TRACE_ARGS, __entry->ino,
|
||||
__entry->nlink)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_inode_orphan_delete,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu ret %d", SCSB_TRACE_ARGS, __entry->ino,
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_delete_inode,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, umode_t mode, u64 size),
|
||||
|
||||
@@ -1060,6 +1210,32 @@ TRACE_EVENT(scoutfs_delete_inode,
|
||||
__entry->mode, __entry->size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_delete_inode_end,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, umode_t mode, u64 size, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, mode, size, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(__u64, ino)
|
||||
__field(umode_t, mode)
|
||||
__field(__u64, size)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = sb->s_dev;
|
||||
__entry->ino = ino;
|
||||
__entry->mode = mode;
|
||||
__entry->size = size;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d ino %llu, mode 0x%x size %llu, ret %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
|
||||
__entry->mode, __entry->size, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_key_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key),
|
||||
TP_ARGS(sb, key),
|
||||
@@ -1443,28 +1619,6 @@ DEFINE_EVENT(scoutfs_work_class, scoutfs_data_return_server_extents_exit,
|
||||
TP_ARGS(sb, data, ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_shrink_exit_class,
|
||||
TP_PROTO(struct super_block *sb, unsigned long nr_to_scan, int ret),
|
||||
TP_ARGS(sb, nr_to_scan, ret),
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, sb)
|
||||
__field(unsigned long, nr_to_scan)
|
||||
__field(int, ret)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->sb = sb;
|
||||
__entry->nr_to_scan = nr_to_scan;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
TP_printk("sb %p nr_to_scan %lu ret %d",
|
||||
__entry->sb, __entry->nr_to_scan, __entry->ret)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_shrink_exit_class, scoutfs_lock_shrink_exit,
|
||||
TP_PROTO(struct super_block *sb, unsigned long nr_to_scan, int ret),
|
||||
TP_ARGS(sb, nr_to_scan, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_rename,
|
||||
TP_PROTO(struct super_block *sb, struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
@@ -3097,6 +3251,24 @@ TRACE_EVENT(scoutfs_ioc_search_xattrs,
|
||||
__entry->ino, __entry->last_ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trigger_fired,
|
||||
TP_PROTO(struct super_block *sb, const char *name),
|
||||
|
||||
TP_ARGS(sb, name),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(const char *, name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->name = name;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" %s", SCSB_TRACE_ARGS, __entry->name)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_SCOUTFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include "recov.h"
|
||||
#include "omap.h"
|
||||
#include "fence.h"
|
||||
#include "triggers.h"
|
||||
|
||||
/*
|
||||
* Every active mount can act as the server that listens on a net
|
||||
@@ -994,10 +995,11 @@ static int for_each_rid_last_lt(struct super_block *sb, struct scoutfs_btree_roo
|
||||
}
|
||||
|
||||
/*
|
||||
* Log merge range items are stored at the starting fs key of the range.
|
||||
* The only fs key field that doesn't hold information is the zone, so
|
||||
* we use the zone to differentiate all types that we store in the log
|
||||
* merge tree.
|
||||
* Log merge range items are stored at the starting fs key of the range
|
||||
* with the zone overwritten to indicate the log merge item type. This
|
||||
* day0 mistake loses sorting information for items in the different
|
||||
* zones in the fs root, so the range items aren't strictly sorted by
|
||||
* the starting key of their range.
|
||||
*/
|
||||
static void init_log_merge_key(struct scoutfs_key *key, u8 zone, u64 first,
|
||||
u64 second)
|
||||
@@ -1029,6 +1031,51 @@ static int next_log_merge_item_key(struct super_block *sb, struct scoutfs_btree_
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The range items aren't sorted by their range.start because
|
||||
* _RANGE_ZONE clobbers the range's zone. We sweep all the items and
|
||||
* find the range with the next least starting key that's greater than
|
||||
* the caller's starting key. We have to be careful to iterate over the
|
||||
* log_merge tree keys because the ranges can overlap as they're mapped
|
||||
* to the log_merge keys by clobbering their zone.
|
||||
*/
|
||||
static int next_log_merge_range(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *start, struct scoutfs_log_merge_range *rng)
|
||||
{
|
||||
struct scoutfs_log_merge_range *next;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
key = *start;
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_RANGE_ZONE;
|
||||
scoutfs_key_set_ones(&rng->start);
|
||||
|
||||
do {
|
||||
ret = scoutfs_btree_next(sb, root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.key->sk_zone != SCOUTFS_LOG_MERGE_RANGE_ZONE) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len != sizeof(struct scoutfs_log_merge_range)) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
next = iref.val;
|
||||
if (scoutfs_key_compare(&next->start, &rng->start) < 0 &&
|
||||
scoutfs_key_compare(&next->start, start) >= 0)
|
||||
*rng = *next;
|
||||
key = *iref.key;
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
} while (ret == 0);
|
||||
|
||||
if (ret == -ENOENT && !scoutfs_key_is_ones(&rng->start))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int next_log_merge_item(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
u8 zone, u64 first, u64 second,
|
||||
@@ -1245,9 +1292,13 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
* meta was low so that deleted items are merged
|
||||
* promptly and freed blocks can bring the client out of
|
||||
* enospc.
|
||||
*
|
||||
* The trigger can be used to force a log merge in cases where
|
||||
* a test only generates small amounts of change.
|
||||
*/
|
||||
finalize_ours = (lt->item_root.height > 2) ||
|
||||
(le32_to_cpu(lt->meta_avail.flags) & SCOUTFS_ALLOC_FLAG_LOW);
|
||||
(le32_to_cpu(lt->meta_avail.flags) & SCOUTFS_ALLOC_FLAG_LOW) ||
|
||||
scoutfs_trigger(sb, LOG_MERGE_FORCE_FINALIZE_OURS);
|
||||
|
||||
trace_scoutfs_server_finalize_decision(sb, rid, saw_finalized, others_active,
|
||||
ours_visible, finalize_ours, delay_ms,
|
||||
@@ -1356,6 +1407,8 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, log_merge_start);
|
||||
|
||||
/* we're done, caller can make forward progress */
|
||||
break;
|
||||
}
|
||||
@@ -1572,7 +1625,8 @@ static int server_get_log_trees(struct super_block *sb,
|
||||
goto update;
|
||||
}
|
||||
|
||||
ret = alloc_move_empty(sb, &super->data_alloc, <.data_freed, 100);
|
||||
ret = alloc_move_empty(sb, &super->data_alloc, <.data_freed,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2);
|
||||
if (ret == -EINPROGRESS)
|
||||
ret = 0;
|
||||
if (ret < 0) {
|
||||
@@ -1682,6 +1736,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
int ret;
|
||||
|
||||
if (arg_len != sizeof(struct scoutfs_log_trees)) {
|
||||
err_str = "invalid message log_trees size";
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -1745,7 +1800,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
|
||||
ret = scoutfs_btree_update(sb, &server->alloc, &server->wri,
|
||||
&super->logs_root, &key, <, sizeof(lt));
|
||||
BUG_ON(ret < 0); /* dirtying should have guaranteed success */
|
||||
BUG_ON(ret < 0); /* dirtying should have guaranteed success, srch item inconsistent */
|
||||
if (ret < 0)
|
||||
err_str = "updating log trees item";
|
||||
|
||||
@@ -1753,11 +1808,10 @@ unlock:
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
out:
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "server error %d committing client logs for rid %016llx, nr %llu: %s",
|
||||
ret, rid, le64_to_cpu(lt.nr), err_str);
|
||||
out:
|
||||
WARN_ON_ONCE(ret < 0);
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
|
||||
}
|
||||
|
||||
@@ -1867,9 +1921,11 @@ static int reclaim_open_log_tree(struct super_block *sb, u64 rid)
|
||||
scoutfs_alloc_splice_list(sb, &server->alloc, &server->wri, server->other_freed,
|
||||
<.meta_avail)) ?:
|
||||
(err_str = "empty data_avail",
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_avail, 100)) ?:
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_avail,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2)) ?:
|
||||
(err_str = "empty data_freed",
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_freed, 100));
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_freed,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2));
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
|
||||
/* only finalize, allowing merging, once the allocators are fully freed */
|
||||
@@ -2094,7 +2150,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
||||
|
||||
apply:
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
WARN_ON_ONCE(ret < 0 && ret != -ENOENT); /* XXX leaked busy item */
|
||||
WARN_ON_ONCE(ret < 0 && ret != -ENOENT && ret != -ENOLINK); /* XXX leaked busy item */
|
||||
out:
|
||||
ret = scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
sc, sizeof(struct scoutfs_srch_compact));
|
||||
@@ -2460,6 +2516,8 @@ static int splice_log_merge_completions(struct super_block *sb,
|
||||
queue_work(server->wq, &server->log_merge_free_work);
|
||||
else
|
||||
err_str = "deleting merge status item";
|
||||
|
||||
scoutfs_inc_counter(sb, log_merge_complete);
|
||||
out:
|
||||
if (upd_stat) {
|
||||
init_log_merge_key(&key, SCOUTFS_LOG_MERGE_STATUS_ZONE, 0, 0);
|
||||
@@ -2472,10 +2530,9 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "server error %d splicing log merge completion: %s", ret, err_str);
|
||||
|
||||
BUG_ON(ret); /* inconsistent */
|
||||
/* inconsistent */
|
||||
scoutfs_bug_on_err(sb, ret,
|
||||
"server error %d splicing log merge completion: %s", ret, err_str);
|
||||
|
||||
return ret ?: einprogress;
|
||||
}
|
||||
@@ -2720,10 +2777,7 @@ restart:
|
||||
|
||||
/* find the next range, always checking for splicing */
|
||||
for (;;) {
|
||||
key = stat.next_range_key;
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_RANGE_ZONE;
|
||||
ret = next_log_merge_item_key(sb, &super->log_merge, SCOUTFS_LOG_MERGE_RANGE_ZONE,
|
||||
&key, &rng, sizeof(rng));
|
||||
ret = next_log_merge_range(sb, &super->log_merge, &stat.next_range_key, &rng);
|
||||
if (ret < 0 && ret != -ENOENT) {
|
||||
err_str = "finding merge range item";
|
||||
goto out;
|
||||
@@ -2994,7 +3048,13 @@ static int server_commit_log_merge(struct super_block *sb,
|
||||
SCOUTFS_LOG_MERGE_STATUS_ZONE, 0, 0,
|
||||
&stat, sizeof(stat));
|
||||
if (ret < 0) {
|
||||
err_str = "getting merge status item";
|
||||
/*
|
||||
* During a retransmission, it's possible that the server
|
||||
* already committed and resolved this log merge. ENOENT
|
||||
* is expected in that case.
|
||||
*/
|
||||
if (ret != -ENOENT)
|
||||
err_str = "getting merge status item";
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
|
||||
#include "super.h"
|
||||
#include "triggers.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
* We have debugfs files we can write to which arm triggers which
|
||||
@@ -39,6 +40,7 @@ struct scoutfs_triggers {
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_LOG_MERGE_FORCE_FINALIZE_OURS] = "log_merge_force_finalize_ours",
|
||||
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||
@@ -51,6 +53,7 @@ bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)
|
||||
atomic_t *atom;
|
||||
int old;
|
||||
int mem;
|
||||
bool fired;
|
||||
|
||||
BUG_ON(t >= SCOUTFS_TRIGGER_NR);
|
||||
atom = &triggers->atomics[t];
|
||||
@@ -64,7 +67,12 @@ bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)
|
||||
mem = atomic_cmpxchg(atom, old, 0);
|
||||
} while (mem && mem != old);
|
||||
|
||||
return !!mem;
|
||||
fired = !!mem;
|
||||
|
||||
if (fired)
|
||||
trace_scoutfs_trigger_fired(sb, names[t]);
|
||||
|
||||
return fired;
|
||||
}
|
||||
|
||||
int scoutfs_setup_triggers(struct super_block *sb)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_LOG_MERGE_FORCE_FINALIZE_OURS,
|
||||
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||
|
||||
@@ -8,36 +8,33 @@
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
log() {
|
||||
echo "$@" > /dev/stderr
|
||||
echo_fail() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
# silence error messages
|
||||
quiet_cat()
|
||||
{
|
||||
cat "$@" 2>/dev/null
|
||||
}
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
shopt -s nullglob
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
[ ! -d "$fs" ] && continue
|
||||
fs_rid="$(quiet_cat $fs/rid)"
|
||||
nr="$(quiet_cat $fs/data_device_maj_min)"
|
||||
[ ! -d "$fs" -o "$fs_rid" != "$rid" ] && continue
|
||||
|
||||
fs_rid="$(cat $fs/rid)" || \
|
||||
echo_fail "failed to get rid in $fs"
|
||||
if [ "$fs_rid" != "$rid" ]; then
|
||||
continue
|
||||
mnt=$(findmnt -l -n -t scoutfs -o TARGET -S $nr)
|
||||
[ -z "$mnt" ] && continue
|
||||
|
||||
if ! umount -qf "$mnt"; then
|
||||
if [ -d "$fs" ]; then
|
||||
echo_fail "umount -qf $mnt failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
nr="$(cat $fs/data_device_maj_min)" || \
|
||||
echo_fail "failed to get data device major:minor in $fs"
|
||||
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET -S $nr) || \
|
||||
echo_fail "findmnt -t scoutfs -S $nr failed"
|
||||
for mnt in $mnts; do
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt failed"
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -121,6 +121,7 @@ t_filter_dmesg()
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
re="$re|clocksource: Long readout interval"
|
||||
|
||||
# fencing tests force unmounts and trigger timeouts
|
||||
re="$re|scoutfs .* forcing unmount"
|
||||
@@ -166,6 +167,12 @@ t_filter_dmesg()
|
||||
# perf warning that it adjusted sample rate
|
||||
re="$re|perf: interrupt took too long.*lowering kernel.perf_event_max_sample_rate.*"
|
||||
|
||||
# some ci test guests are unresponsive
|
||||
re="$re|longest quorum heartbeat .* delay"
|
||||
|
||||
# creating block devices may trigger this
|
||||
re="$re|block device autoloading is deprecated and will be removed."
|
||||
|
||||
egrep -v "($re)" | \
|
||||
ignore_harmless_unwind_kasan_stack_oob
|
||||
}
|
||||
|
||||
@@ -498,3 +498,121 @@ t_restore_all_sysfs_mount_options() {
|
||||
t_set_sysfs_mount_option $i $name "${_saved_opts[$ind]}"
|
||||
done
|
||||
}
|
||||
|
||||
t_force_log_merge() {
|
||||
local sv=$(t_server_nr)
|
||||
local merges_started
|
||||
local last_merges_started
|
||||
local merges_completed
|
||||
local last_merges_completed
|
||||
|
||||
while true; do
|
||||
last_merges_started=$(t_counter log_merge_start $sv)
|
||||
last_merges_completed=$(t_counter log_merge_complete $sv)
|
||||
|
||||
t_trigger_arm_silent log_merge_force_finalize_ours $sv
|
||||
|
||||
t_sync_seq_index
|
||||
|
||||
while test "$(t_trigger_get log_merge_force_finalize_ours $sv)" == "1"; do
|
||||
sleep .5
|
||||
done
|
||||
|
||||
merges_started=$(t_counter log_merge_start $sv)
|
||||
|
||||
if (( merges_started > last_merges_started )); then
|
||||
merges_completed=$(t_counter log_merge_complete $sv)
|
||||
|
||||
while (( merges_completed == last_merges_completed )); do
|
||||
sleep .5
|
||||
merges_completed=$(t_counter log_merge_complete $sv)
|
||||
done
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _last_scan
|
||||
t_get_orphan_scan_runs() {
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
_last_scan[$i]=$(t_counter orphan_scan $i)
|
||||
done
|
||||
}
|
||||
|
||||
t_wait_for_orphan_scan_runs() {
|
||||
local i
|
||||
local scan
|
||||
|
||||
t_get_orphan_scan_runs
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
while true; do
|
||||
scan=$(t_counter orphan_scan $i)
|
||||
if (( scan != _last_scan[$i] )); then
|
||||
break
|
||||
fi
|
||||
sleep .5
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _last_empty
|
||||
t_get_orphan_scan_empty() {
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
_last_empty[$i]=$(t_counter orphan_scan_empty $i)
|
||||
done
|
||||
}
|
||||
|
||||
t_wait_for_no_orphans() {
|
||||
local i;
|
||||
local working;
|
||||
local empty;
|
||||
|
||||
t_get_orphan_scan_empty
|
||||
|
||||
while true; do
|
||||
working=0
|
||||
|
||||
t_wait_for_orphan_scan_runs
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
empty=$(t_counter orphan_scan_empty $i)
|
||||
if (( empty == _last_empty[$i] )); then
|
||||
(( working++ ))
|
||||
else
|
||||
(( _last_empty[$i] = empty ))
|
||||
fi
|
||||
done
|
||||
|
||||
if (( working == 0 )); then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Repeatedly run the arguments as a command, sleeping in between, until
|
||||
# it returns success. The first argument is a relative timeout in
|
||||
# seconds. The remaining arguments are the command and its arguments.
|
||||
#
|
||||
# If the timeout expires without the command returning 0 then the test
|
||||
# fails.
|
||||
#
|
||||
t_wait_until_timeout() {
|
||||
local relative="$1"
|
||||
local expire="$((SECONDS + relative))"
|
||||
shift
|
||||
|
||||
while (( SECONDS < expire )); do
|
||||
"$@" && return
|
||||
sleep 1
|
||||
done
|
||||
|
||||
t_fail "command failed for $relative sec: $@"
|
||||
}
|
||||
|
||||
@@ -43,9 +43,14 @@ t_tap_progress()
|
||||
local testname=$1
|
||||
local result=$2
|
||||
|
||||
local stmsg=""
|
||||
local diff=""
|
||||
local dmsg=""
|
||||
|
||||
if [[ -s $T_RESULTS/tmp/${testname}/status.msg ]]; then
|
||||
stmsg="1"
|
||||
fi
|
||||
|
||||
if [[ -s "$T_RESULTS/tmp/${testname}/dmesg.new" ]]; then
|
||||
dmsg="1"
|
||||
fi
|
||||
@@ -61,6 +66,7 @@ t_tap_progress()
|
||||
echo "# ${testname} ** skipped - permitted **"
|
||||
else
|
||||
echo "not ok ${i} - ${testname}"
|
||||
|
||||
case ${result} in
|
||||
101)
|
||||
echo "# ${testname} ** skipped **"
|
||||
@@ -70,6 +76,13 @@ t_tap_progress()
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n "${stmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# status:"
|
||||
echo "#"
|
||||
cat $T_RESULTS/tmp/${testname}/status.msg | sed 's/^/# - /'
|
||||
fi
|
||||
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "#"
|
||||
echo "# diff:"
|
||||
|
||||
@@ -17,7 +17,7 @@ ino not found in dseq index
|
||||
mount 0 contents after mount 1 rm: contents
|
||||
ino found in dseq index
|
||||
ino found in dseq index
|
||||
stat: cannot stat '/mnt/test/test/inode-deletion/file': No such file or directory
|
||||
stat: cannot stat '/mnt/test/test/inode-deletion/badfile': No such file or directory
|
||||
ino not found in dseq index
|
||||
ino not found in dseq index
|
||||
== lots of deletions use one open map
|
||||
|
||||
@@ -39,20 +39,6 @@ cmd() {
|
||||
die "cmd failed (check the run.log)"
|
||||
}
|
||||
|
||||
# we can record pids to kill as we exit, we kill in reverse added order
|
||||
declare -a atexit_kill_pids
|
||||
atexit_kill()
|
||||
{
|
||||
local pid
|
||||
|
||||
for pid in $(echo ${atexit_kill_pids[*]} | rev); do
|
||||
if test -e "/proc/$pid/status" ; then
|
||||
kill "$pid"
|
||||
fi
|
||||
done
|
||||
}
|
||||
trap atexit_kill EXIT
|
||||
|
||||
show_help()
|
||||
{
|
||||
cat << EOF
|
||||
@@ -414,7 +400,8 @@ if [ -n "$T_INSMOD" ]; then
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_MULT" ]; then
|
||||
orig_trace_size=$(cat /sys/kernel/debug/tracing/buffer_size_kb)
|
||||
# orig_trace_size=$(cat /sys/kernel/debug/tracing/buffer_size_kb)
|
||||
orig_trace_size=1408
|
||||
mult_trace_size=$((orig_trace_size * T_TRACE_MULT))
|
||||
msg "increasing trace buffer size from $orig_trace_size KiB to $mult_trace_size KiB"
|
||||
echo $mult_trace_size > /sys/kernel/debug/tracing/buffer_size_kb
|
||||
@@ -452,6 +439,30 @@ cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/sys/kernel/debug/tracing/buffer_size_kb \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
|
||||
# we can record pids to kill as we exit, we kill in reverse added order
|
||||
atexit_kill_pids=""
|
||||
add_atexit_kill_pid()
|
||||
{
|
||||
atexit_kill_pids="$1 $atexit_kill_pids"
|
||||
}
|
||||
atexit_kill()
|
||||
{
|
||||
local pid
|
||||
|
||||
# suppress bg function exited messages
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
|
||||
for pid in $atexit_kill_pids; do
|
||||
if test -e "/proc/$pid/status" ; then
|
||||
kill "$pid"
|
||||
wait "$pid"
|
||||
fi
|
||||
done
|
||||
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
}
|
||||
trap atexit_kill EXIT
|
||||
|
||||
#
|
||||
# Build a fenced config that runs scripts out of the repository rather
|
||||
# than the default system directory
|
||||
@@ -467,7 +478,7 @@ T_FENCED_LOG="$T_RESULTS/fenced.log"
|
||||
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_FENCED_LOG" 2>&1 &
|
||||
fenced_pid=$!
|
||||
atexit_kill_pids+=($fenced_pid)
|
||||
add_atexit_kill_pid $fenced_pid
|
||||
|
||||
#
|
||||
# some critical failures will cause fs operations to hang. We can watch
|
||||
@@ -494,15 +505,17 @@ crash_monitor()
|
||||
fi
|
||||
|
||||
if [ "$bad" != 0 ]; then
|
||||
echo "run-tests monitor triggering crash"
|
||||
echo "run-tests monitor syncing and triggering crash"
|
||||
# hail mary, the sync could well hang
|
||||
(echo s > /proc/sysrq-trigger) &
|
||||
sleep 5
|
||||
echo c > /proc/sysrq-trigger
|
||||
# bg function doesn't reload bash, $$ is parent run-tests.sh
|
||||
kill -9 $$
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
crash_monitor &
|
||||
atexit_kill_pids+=($!)
|
||||
add_atexit_kill_pid $!
|
||||
|
||||
# setup dm tables
|
||||
echo "0 $(blockdev --getsz $T_META_DEVICE) linear $T_META_DEVICE 0" > \
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
@@ -29,7 +30,7 @@
|
||||
#include <errno.h>
|
||||
|
||||
static int size = 0;
|
||||
static int count = 0; /* XXX make this duration instead */
|
||||
static int duration = 0;
|
||||
|
||||
struct thread_info {
|
||||
int nr;
|
||||
@@ -41,6 +42,8 @@ static void *run_test_func(void *ptr)
|
||||
void *buf = NULL;
|
||||
char *addr = NULL;
|
||||
struct thread_info *tinfo = ptr;
|
||||
uint64_t seconds = 0;
|
||||
struct timespec ts;
|
||||
int c = 0;
|
||||
int fd;
|
||||
ssize_t read, written, ret;
|
||||
@@ -61,9 +64,15 @@ static void *run_test_func(void *ptr)
|
||||
|
||||
usleep(100000); /* 0.1sec to allow all threads to start roughly at the same time */
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &ts); /* record start time */
|
||||
seconds = ts.tv_sec + duration;
|
||||
|
||||
for (;;) {
|
||||
if (++c > count)
|
||||
break;
|
||||
if (++c % 16 == 0) {
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
if (ts.tv_sec >= seconds)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (rand() % 4) {
|
||||
case 0: /* pread */
|
||||
@@ -99,6 +108,8 @@ static void *run_test_func(void *ptr)
|
||||
memcpy(addr, buf, size); /* noerr */
|
||||
break;
|
||||
}
|
||||
|
||||
usleep(10000);
|
||||
}
|
||||
|
||||
munmap(addr, size);
|
||||
@@ -120,7 +131,7 @@ int main(int argc, char **argv)
|
||||
int i;
|
||||
|
||||
if (argc != 8) {
|
||||
fprintf(stderr, "%s requires 7 arguments - size count file1 file2 file3 file4 file5\n", argv[0]);
|
||||
fprintf(stderr, "%s requires 7 arguments - size duration file1 file2 file3 file4 file5\n", argv[0]);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@@ -130,9 +141,9 @@ int main(int argc, char **argv)
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
count = atoi(argv[2]);
|
||||
if (count < 0) {
|
||||
fprintf(stderr, "invalid count, must be greater than 0\n");
|
||||
duration = atoi(argv[2]);
|
||||
if (duration < 0) {
|
||||
fprintf(stderr, "invalid duration, must be greater than or equal to 0\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
t_require_commands sleep touch grep sync scoutfs
|
||||
t_require_mounts 2
|
||||
|
||||
# regularly see ~20/~30s
|
||||
VERIFY_TIMEOUT_SECS=90
|
||||
|
||||
#
|
||||
# Make sure that all mounts can read the results of a write from each
|
||||
# mount.
|
||||
@@ -40,8 +43,10 @@ verify_fenced_run()
|
||||
|
||||
for rid in $rids; do
|
||||
grep -q ".* running rid '$rid'.* args 'ignored run args'" "$T_FENCED_LOG" || \
|
||||
t_fail "fenced didn't execute RUN script for rid $rid"
|
||||
return 1
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
echo "== make sure all mounts can see each other"
|
||||
@@ -54,14 +59,7 @@ rid=$(t_mount_rid $cl)
|
||||
echo "cl $cl sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_force_umount $cl
|
||||
# wait for client reconnection to timeout
|
||||
while grep -q $rid $(t_debugfs_path $sv)/connections; do
|
||||
sleep .5
|
||||
done
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rid
|
||||
t_mount $cl
|
||||
check_read_write
|
||||
|
||||
@@ -83,15 +81,7 @@ for cl in $(t_fs_nrs); do
|
||||
t_force_umount $cl
|
||||
done
|
||||
|
||||
# wait for all client reconnections to timeout
|
||||
while egrep -q "($pattern)" $(t_debugfs_path $sv)/connections; do
|
||||
sleep .5
|
||||
done
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rids
|
||||
# remount all the clients
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
@@ -107,12 +97,7 @@ rid=$(t_mount_rid $sv)
|
||||
echo "sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_force_umount $sv
|
||||
t_wait_for_leader
|
||||
# wait until new server is done fencing unmounted leader rid
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rid
|
||||
t_mount $sv
|
||||
check_read_write
|
||||
|
||||
@@ -127,11 +112,7 @@ for nr in $(t_fs_nrs); do
|
||||
t_force_umount $nr
|
||||
done
|
||||
t_mount_all
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rids
|
||||
check_read_write
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -72,7 +72,7 @@ touch $T_D0/dir/file
|
||||
mkdir $T_D0/dir/dir
|
||||
ln -s $T_D0/dir/file $T_D0/dir/symlink
|
||||
mknod $T_D0/dir/char c 1 3 # null
|
||||
mknod $T_D0/dir/block b 7 0 # loop0
|
||||
mknod $T_D0/dir/block b 42 0 # SAMPLE block dev - nonexistant/demo use only number
|
||||
for name in $(ls -UA $T_D0/dir | sort); do
|
||||
ino=$(stat -c '%i' $T_D0/dir/$name)
|
||||
$GRE $ino | filter_types
|
||||
|
||||
@@ -61,18 +61,28 @@ rm -f "$T_D1/file"
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
|
||||
# Hurry along the orphan scanners. If any are currently asleep, we will
|
||||
# have to wait at least their current scan interval before they wake up,
|
||||
# run, and notice their new interval.
|
||||
t_save_all_sysfs_mount_options orphan_scan_delay_ms
|
||||
t_set_all_sysfs_mount_options orphan_scan_delay_ms 500
|
||||
t_wait_for_orphan_scan_runs
|
||||
|
||||
echo "== unlink wait for open on other mount"
|
||||
echo "contents" > "$T_D0/file"
|
||||
ino=$(stat -c "%i" "$T_D0/file")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
|
||||
exec {FD}<"$T_D0/file"
|
||||
rm -f "$T_D1/file"
|
||||
echo "contents" > "$T_D0/badfile"
|
||||
ino=$(stat -c "%i" "$T_D0/badfile")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/badfile")
|
||||
exec {FD}<"$T_D0/badfile"
|
||||
rm -f "$T_D1/badfile"
|
||||
echo "mount 0 contents after mount 1 rm: $(cat <&$FD)"
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
exec {FD}>&- # close
|
||||
# we know that revalidating will unhash the remote dentry
|
||||
stat "$T_D0/file" 2>&1 | sed 's/cannot statx/cannot stat/' | t_filter_fs
|
||||
stat "$T_D0/badfile" 2>&1 | sed 's/cannot statx/cannot stat/' | t_filter_fs
|
||||
t_force_log_merge
|
||||
# wait for orphan scanners to pick up the unlinked inode and become idle
|
||||
t_wait_for_no_orphans
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
|
||||
@@ -83,16 +93,20 @@ rm -f "$T_D0/dir"/files-*
|
||||
rmdir "$T_D0/dir"
|
||||
|
||||
echo "== open files survive remote scanning orphans"
|
||||
echo "contents" > "$T_D0/file"
|
||||
ino=$(stat -c "%i" "$T_D0/file")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
|
||||
exec {FD}<"$T_D0/file"
|
||||
rm -f "$T_D0/file"
|
||||
echo "contents" > "$T_D0/lastfile"
|
||||
ino=$(stat -c "%i" "$T_D0/lastfile")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/lastfile")
|
||||
exec {FD}<"$T_D0/lastfile"
|
||||
rm -f "$T_D0/lastfile"
|
||||
t_umount 1
|
||||
t_mount 1
|
||||
echo "mount 0 contents after mount 1 remounted: $(cat <&$FD)"
|
||||
exec {FD}>&- # close
|
||||
t_force_log_merge
|
||||
t_wait_for_no_orphans
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
|
||||
t_restore_all_sysfs_mount_options orphan_scan_delay_ms
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
t_require_commands mmap_stress mmap_validate scoutfs xfs_io
|
||||
|
||||
echo "== mmap_stress"
|
||||
mmap_stress 8192 2000 "$T_D0/mmap_stress" "$T_D1/mmap_stress" "$T_D2/mmap_stress" "$T_D3/mmap_stress" "$T_D4/mmap_stress" | sed 's/:.*//g' | sort
|
||||
mmap_stress 8192 30 "$T_D0/mmap_stress" "$T_D0/mmap_stress" "$T_D0/mmap_stress" "$T_D3/mmap_stress" "$T_D3/mmap_stress" | sed 's/:.*//g' | sort
|
||||
|
||||
echo "== basic mmap/read/write consistency checks"
|
||||
mmap_validate 256 1000 "$T_D0/mmap_val1" "$T_D1/mmap_val1"
|
||||
|
||||
@@ -62,7 +62,7 @@ test_timeout()
|
||||
sleep 1
|
||||
|
||||
# tear down the current server/leader
|
||||
t_force_umount $sv
|
||||
t_force_umount $sv &
|
||||
|
||||
# see how long it takes for the next leader to start
|
||||
start=$(time_ms)
|
||||
@@ -73,6 +73,7 @@ test_timeout()
|
||||
echo "to $to delay $delay" >> $T_TMP.delay
|
||||
|
||||
# restore the mount that we tore down
|
||||
wait
|
||||
t_mount $sv
|
||||
|
||||
# make sure the new leader delay was reasonable, allowing for some slack
|
||||
|
||||
@@ -8,19 +8,19 @@ t_require_mounts 2
|
||||
echo "=== renameat2 noreplace flag test"
|
||||
|
||||
# give each mount their own dir (lock group) to minimize create contention
|
||||
mkdir $T_M0/dir0
|
||||
mkdir $T_M1/dir1
|
||||
mkdir $T_D0/dir0
|
||||
mkdir $T_D1/dir1
|
||||
|
||||
echo "=== run two asynchronous calls to renameat2 NOREPLACE"
|
||||
for i in $(seq 0 100); do
|
||||
# prepare inputs in isolation
|
||||
touch "$T_M0/dir0/old0"
|
||||
touch "$T_M1/dir1/old1"
|
||||
touch "$T_D0/dir0/old0"
|
||||
touch "$T_D1/dir1/old1"
|
||||
|
||||
# race doing noreplace renames, both can't succeed
|
||||
dumb_renameat2 -n "$T_M0/dir0/old0" "$T_M0/dir0/sharednew" 2> /dev/null &
|
||||
dumb_renameat2 -n "$T_D0/dir0/old0" "$T_D0/dir0/sharednew" 2> /dev/null &
|
||||
pid0=$!
|
||||
dumb_renameat2 -n "$T_M1/dir1/old1" "$T_M1/dir0/sharednew" 2> /dev/null &
|
||||
dumb_renameat2 -n "$T_D1/dir1/old1" "$T_D1/dir0/sharednew" 2> /dev/null &
|
||||
pid1=$!
|
||||
|
||||
wait $pid0
|
||||
@@ -31,7 +31,7 @@ for i in $(seq 0 100); do
|
||||
test "$rc0" == 0 -a "$rc1" == 0 && t_fail "both renames succeeded"
|
||||
|
||||
# blow away possible files for either race outcome
|
||||
rm -f "$T_M0/dir0/old0" "$T_M1/dir1/old1" "$T_M0/dir0/sharednew" "$T_M1/dir1/sharednew"
|
||||
rm -f "$T_D0/dir0/old0" "$T_D1/dir1/old1" "$T_D0/dir0/sharednew" "$T_D1/dir1/sharednew"
|
||||
done
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -62,31 +62,27 @@ test -x "$SCOUTFS_FENCED_RUN" || \
|
||||
# files disappear.
|
||||
#
|
||||
|
||||
# generate failure messages to stderr while still echoing 0 for the caller
|
||||
careful_cat()
|
||||
# silence error messages
|
||||
quiet_cat()
|
||||
{
|
||||
local path="$@"
|
||||
|
||||
cat "$@" || echo 0
|
||||
cat "$@" 2>/dev/null
|
||||
}
|
||||
|
||||
while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
shopt -s nullglob
|
||||
for fence in /sys/fs/scoutfs/*/fence/*; do
|
||||
# catches unmatched regex when no dirs
|
||||
if [ ! -d "$fence" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# skip requests that have been handled
|
||||
if [ "$(careful_cat $fence/fenced)" == 1 -o \
|
||||
"$(careful_cat $fence/error)" == 1 ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
srv=$(basename $(dirname $(dirname $fence)))
|
||||
rid="$(cat $fence/rid)"
|
||||
ip="$(cat $fence/ipv4_addr)"
|
||||
reason="$(cat $fence/reason)"
|
||||
fenced="$(quiet_cat $fence/fenced)"
|
||||
error="$(quiet_cat $fence/error)"
|
||||
rid="$(quiet_cat $fence/rid)"
|
||||
ip="$(quiet_cat $fence/ipv4_addr)"
|
||||
reason="$(quiet_cat $fence/reason)"
|
||||
|
||||
# request dirs can linger then disappear after fenced/error is set
|
||||
if [ ! -d "$fence" -o "$fenced" == "1" -o "$error" == "1" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
log_message "server $srv fencing rid $rid at IP $ip for $reason"
|
||||
|
||||
|
||||
@@ -55,6 +55,30 @@ with initial sparse regions (perhaps by multiple threads writing to
|
||||
different regions) and wasted space isn't an issue (perhaps because the
|
||||
file population contains few small files).
|
||||
.TP
|
||||
.B ino_alloc_per_lock=<number>
|
||||
This option determines how many inode numbers are allocated in the same
|
||||
cluster lock. The default, and maximum, is 1024. The minimum is 1.
|
||||
Allocating fewer inodes per lock can allow more parallelism between
|
||||
mounts because there are more locks that cover the same number of
|
||||
created files. This can be helpful when working with smaller numbers of
|
||||
large files.
|
||||
.TP
|
||||
.B lock_idle_count=<number>
|
||||
This option sets the number of locks that the client will allow to
|
||||
remain idle after being granted. If the number of locks exceeds this
|
||||
count then the client will try to free the oldest locks. This setting
|
||||
is per-mount and only changes the behavior of that mount.
|
||||
.sp
|
||||
Idle locks are not reclaimed by memory pressure so this option
|
||||
determines the limit of how much memory is likely to be pinned by
|
||||
allocated idle locks. Setting this too low can increase latency of
|
||||
operations as repeated use of a working set of locks has to request the
|
||||
locks from the network rather than using granted idle locks.
|
||||
.sp
|
||||
The count is not strictly enforced. Operations are allowed to use locks
|
||||
while over the limit to avoid deadlocks under heavy concurrent load.
|
||||
Exceeding the count only attempts freeing of idle locks.
|
||||
.TP
|
||||
.B log_merge_wait_timeout_ms=<number>
|
||||
This option sets the amount of time, in milliseconds, that log merge
|
||||
creation can wait before timing out. This setting is per-mount, only
|
||||
|
||||
Reference in New Issue
Block a user