mirror of
https://github.com/versity/scoutfs.git
synced 2026-05-03 11:25:43 +00:00
Compare commits
60 Commits
clk/cluste
...
auke/ipv6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9fe5e895a4 | ||
|
|
132d73d435 | ||
|
|
d2bb5c6cba | ||
|
|
1031e71b19 | ||
|
|
90bd7f9f43 | ||
|
|
3a05c69643 | ||
|
|
533f309aec | ||
|
|
0ef22b3c44 | ||
|
|
85ffba5329 | ||
|
|
553e6e909e | ||
|
|
9b569415f2 | ||
|
|
6a1e136085 | ||
|
|
7ca789c837 | ||
|
|
4d55fe6251 | ||
|
|
8f896d9783 | ||
|
|
e54f8d3ec0 | ||
|
|
d89e16214d | ||
|
|
b468352254 | ||
|
|
0eb9dfebdc | ||
|
|
f5750de244 | ||
|
|
f0c7996612 | ||
|
|
5143927e07 | ||
|
|
f495f52ec9 | ||
|
|
3dafeaac5b | ||
|
|
ef0f6f8ac2 | ||
|
|
c0cd29aa1b | ||
|
|
50bff13f21 | ||
|
|
de70ca2372 | ||
|
|
5af1412d5f | ||
|
|
0a2b2ad409 | ||
|
|
6c4590a8a0 | ||
|
|
1768f69c3c | ||
|
|
dcb0fd5805 | ||
|
|
660f874488 | ||
|
|
e1a6689a9b | ||
|
|
2884a92408 | ||
|
|
e194714004 | ||
|
|
8bb2f83cf9 | ||
|
|
6a9a6789d5 | ||
|
|
ee630b164f | ||
|
|
1c7678b6f5 | ||
|
|
22b5e79bbd | ||
|
|
259e639271 | ||
|
|
4d66c38c71 | ||
|
|
7ef62894bd | ||
|
|
1f363a1ead | ||
|
|
8ddf9b8c8c | ||
|
|
fd80c17ab6 | ||
|
|
991e2cbdf8 | ||
|
|
92ac132873 | ||
|
|
ad078cd93c | ||
|
|
90cb458cd5 | ||
|
|
1ab798e7eb | ||
|
|
e182914e51 | ||
|
|
8484a58dd6 | ||
|
|
a077104531 | ||
|
|
23aaa994df | ||
|
|
7d14b57b2d | ||
|
|
3f252be4be | ||
|
|
a4d25d9b55 |
@@ -1,6 +1,74 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.27
|
||||
\
|
||||
*Jan 15, 2026*
|
||||
|
||||
Switch away from using the general VM cache reclaim machinery to reduce
|
||||
idle cluster locks in the client. The VM treated locks like a cache and
|
||||
let many accumulate, presuming that it would be efficient to free them
|
||||
in batches. Lock freeing requires network communication so this could
|
||||
result in enormous backlogs in network messages (on the order of
|
||||
hundreds of thousands) and could result in signifcant delays of other
|
||||
network messaging.
|
||||
|
||||
Fix inefficient network receive processing while many messages are in
|
||||
the send queue. This consumed sufficient CPU to cause significant
|
||||
stalls, perhaps resulting in hung task warning messages due to delayed
|
||||
lock message delivery.
|
||||
|
||||
Fix a server livelock case that could happen while committing client
|
||||
transactions that contain a large amount of freed file data extents.
|
||||
This would present as client tasks hanging and a server task spinning
|
||||
consuming cpu.
|
||||
|
||||
Fix a rare server request processing failure that doesn't deal with
|
||||
retransmission of a request that a previous server partially processed.
|
||||
This would present as hung client tasks and repeated "error -2
|
||||
committing log merge: getting merge status item" kernel messages.
|
||||
|
||||
Fix an unneccessary server shutdown during specific circumstances in
|
||||
client lock recovery. The shutdown was due to server state and was
|
||||
ultimately harmless. The next server that started up would proceed
|
||||
accordingly.
|
||||
|
||||
---
|
||||
v1.26
|
||||
\
|
||||
*Nov 17, 2025*
|
||||
|
||||
Add the ino\_alloc\_per\_lock mount option. This changes the number of
|
||||
inode numbers allocated under each cluster lock and can alleviate lock
|
||||
contention for some patterns of larger file creation.
|
||||
|
||||
Add the tcp\_keepalive\_timeout\_ms mount option. This can enable the
|
||||
system to survive longer periods of networking outages.
|
||||
|
||||
Fix a rare double free of internal btree metadata blocks when merging
|
||||
log trees. The duplicated freed metadata block numbers would cause
|
||||
persistent errors in the server, preventing the server from starting and
|
||||
hanging the system.
|
||||
|
||||
Fix the data\_wait interface to not require the correct data\_version of
|
||||
the inode when raising an error. This lets callers raise errors when
|
||||
they're unable to recall the details of the inode to discover its
|
||||
data\_version.
|
||||
|
||||
Change scoutfs to more aggressively reclaim cached memory when under
|
||||
memory pressure. This makes scoutfs behave more like other kernel
|
||||
components and it integrates better with the reclaim policy heuristics
|
||||
in the VM core of the kernel.
|
||||
|
||||
Change scoutfs to more efficiently transmit and receive socket messages.
|
||||
Under heavy load this can process messages sufficiently more quickly to
|
||||
avoid hung task messages for tasks that were waiting for cluster lock
|
||||
messages to be processed.
|
||||
|
||||
Fix faulty server block commit budget calculations that were generating
|
||||
spurious "holders exceeded alloc budget" console messages.
|
||||
|
||||
---
|
||||
v1.25
|
||||
\
|
||||
|
||||
@@ -479,10 +479,20 @@ ifneq (,$(shell grep '^unsigned int stack_trace_save' include/linux/stacktrace.h
|
||||
ccflags-y += -DKC_STACK_TRACE_SAVE
|
||||
endif
|
||||
|
||||
# v6.1-rc1-4-g7420332a6ff4
|
||||
#
|
||||
# .get_acl() method now has dentry arg (and mnt_idmap). The old get_acl has been renamed
|
||||
# to get_inode_acl() and is still available as well, but has an extra rcu param.
|
||||
ifneq (,$(shell grep 'struct posix_acl ...get_acl..struct mnt_idmap ., struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GET_ACL_DENTRY
|
||||
# v6.1-rc1-2-g138060ba92b3
|
||||
#
|
||||
# set_acl now passed a struct dentry instead of inode.
|
||||
#
|
||||
ifneq (,$(shell grep 'int ..set_acl.*struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_SET_ACL_DENTRY
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.1-rc1-3-gcac2f8b8d8b5
|
||||
#
|
||||
# get_acl renamed to get_inode_acl.
|
||||
#
|
||||
ifneq (,$(shell grep 'struct posix_acl.*get_inode_acl' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GET_INODE_ACL
|
||||
endif
|
||||
|
||||
@@ -107,20 +107,22 @@ struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct s
|
||||
return acl;
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
struct posix_acl *scoutfs_get_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, int type)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type, bool rcu)
|
||||
#else
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
#endif
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct posix_acl *acl;
|
||||
int ret;
|
||||
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
if (rcu)
|
||||
return ERR_PTR(-ECHILD);
|
||||
#endif
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
@@ -208,7 +210,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
#ifdef KC_SET_ACL_DENTRY
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct posix_acl *acl, int type)
|
||||
{
|
||||
@@ -254,9 +256,8 @@ int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value,
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
acl = scoutfs_get_acl(KC_VFS_INIT_NS
|
||||
dentry, type);
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
acl = scoutfs_get_acl(dentry->d_inode, type, false);
|
||||
#else
|
||||
acl = scoutfs_get_acl(dentry->d_inode, type);
|
||||
#endif
|
||||
@@ -305,7 +306,7 @@ int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *v
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
#ifdef KC_SET_ACL_DENTRY
|
||||
ret = scoutfs_set_acl(KC_VFS_INIT_NS dentry, acl, type);
|
||||
#else
|
||||
ret = scoutfs_set_acl(dentry->d_inode, acl, type);
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
#ifndef _SCOUTFS_ACL_H_
|
||||
#define _SCOUTFS_ACL_H_
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
struct posix_acl *scoutfs_get_acl(KC_VFS_NS_DEF struct dentry *dentry, int type);
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF struct dentry *dentry, struct posix_acl *acl, int type);
|
||||
#ifdef KC_SET_ACL_DENTRY
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct posix_acl *acl, int type);
|
||||
#else
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
#endif
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type, bool rcu);
|
||||
#else
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type);
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
#endif
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock);
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
|
||||
@@ -479,7 +479,7 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_storage sin;
|
||||
bool am_quorum;
|
||||
int ret;
|
||||
|
||||
|
||||
@@ -125,7 +125,6 @@
|
||||
EXPAND_COUNTER(item_update) \
|
||||
EXPAND_COUNTER(item_write_dirty) \
|
||||
EXPAND_COUNTER(lock_alloc) \
|
||||
EXPAND_COUNTER(lock_count_objects) \
|
||||
EXPAND_COUNTER(lock_free) \
|
||||
EXPAND_COUNTER(lock_grant_request) \
|
||||
EXPAND_COUNTER(lock_grant_response) \
|
||||
@@ -139,13 +138,13 @@
|
||||
EXPAND_COUNTER(lock_lock_error) \
|
||||
EXPAND_COUNTER(lock_nonblock_eagain) \
|
||||
EXPAND_COUNTER(lock_recover_request) \
|
||||
EXPAND_COUNTER(lock_scan_objects) \
|
||||
EXPAND_COUNTER(lock_shrink_attempted) \
|
||||
EXPAND_COUNTER(lock_shrink_aborted) \
|
||||
EXPAND_COUNTER(lock_shrink_work) \
|
||||
EXPAND_COUNTER(lock_shrink_request_failed) \
|
||||
EXPAND_COUNTER(lock_unlock) \
|
||||
EXPAND_COUNTER(lock_wait) \
|
||||
EXPAND_COUNTER(log_merge_complete) \
|
||||
EXPAND_COUNTER(log_merge_no_finalized) \
|
||||
EXPAND_COUNTER(log_merge_start) \
|
||||
EXPAND_COUNTER(log_merge_wait_timeout) \
|
||||
EXPAND_COUNTER(net_dropped_response) \
|
||||
EXPAND_COUNTER(net_send_bytes) \
|
||||
@@ -160,6 +159,7 @@
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_attempts) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_empty) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
|
||||
@@ -2006,7 +2006,11 @@ const struct inode_operations scoutfs_symlink_iops = {
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
.removexattr = generic_removexattr,
|
||||
#endif
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
.get_inode_acl = scoutfs_get_acl,
|
||||
#else
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#endif
|
||||
#ifndef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
.tmpfile = scoutfs_tmpfile,
|
||||
.rename = scoutfs_rename_common,
|
||||
@@ -2052,8 +2056,12 @@ const struct inode_operations scoutfs_dir_iops = {
|
||||
.removexattr = generic_removexattr,
|
||||
#endif
|
||||
.listxattr = scoutfs_listxattr,
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
.get_inode_acl = scoutfs_get_acl,
|
||||
#else
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
#endif
|
||||
#ifdef KC_SET_ACL_DENTRY
|
||||
.set_acl = scoutfs_set_acl,
|
||||
#endif
|
||||
.symlink = scoutfs_symlink,
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include "sysfs.h"
|
||||
#include "server.h"
|
||||
#include "fence.h"
|
||||
#include "net.h"
|
||||
|
||||
/*
|
||||
* Fencing ensures that a given mount can no longer write to the
|
||||
@@ -79,7 +80,7 @@ struct pending_fence {
|
||||
struct timer_list timer;
|
||||
|
||||
ktime_t start_kt;
|
||||
__be32 ipv4_addr;
|
||||
union scoutfs_inet_addr addr;
|
||||
bool fenced;
|
||||
bool error;
|
||||
int reason;
|
||||
@@ -171,14 +172,19 @@ static ssize_t error_store(struct kobject *kobj, struct kobj_attribute *attr, co
|
||||
}
|
||||
SCOUTFS_ATTR_RW(error);
|
||||
|
||||
static ssize_t ipv4_addr_show(struct kobject *kobj,
|
||||
static ssize_t inet_addr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
struct sockaddr_storage sin;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pI4", &fence->ipv4_addr);
|
||||
memset(&sin, 0, sizeof(struct sockaddr_storage));
|
||||
|
||||
scoutfs_addr_to_sin(&sin, &fence->addr);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pISc", SIN_ARG(&sin));
|
||||
}
|
||||
SCOUTFS_ATTR_RO(ipv4_addr);
|
||||
SCOUTFS_ATTR_RO(inet_addr);
|
||||
|
||||
static ssize_t reason_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
@@ -212,7 +218,7 @@ static struct attribute *fence_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(elapsed_secs),
|
||||
SCOUTFS_ATTR_PTR(fenced),
|
||||
SCOUTFS_ATTR_PTR(error),
|
||||
SCOUTFS_ATTR_PTR(ipv4_addr),
|
||||
SCOUTFS_ATTR_PTR(inet_addr),
|
||||
SCOUTFS_ATTR_PTR(reason),
|
||||
SCOUTFS_ATTR_PTR(rid),
|
||||
NULL,
|
||||
@@ -232,7 +238,7 @@ static void fence_timeout(struct timer_list *timer)
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason)
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, union scoutfs_inet_addr *addr, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
@@ -248,7 +254,7 @@ int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int r
|
||||
scoutfs_sysfs_init_attrs(sb, &fence->ssa);
|
||||
|
||||
fence->start_kt = ktime_get();
|
||||
fence->ipv4_addr = ipv4_addr;
|
||||
memcpy(&fence->addr, addr, sizeof(union scoutfs_inet_addr));
|
||||
fence->fenced = false;
|
||||
fence->error = false;
|
||||
fence->reason = reason;
|
||||
|
||||
@@ -7,7 +7,7 @@ enum {
|
||||
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER,
|
||||
};
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason);
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, union scoutfs_inet_addr *addr, int reason);
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error);
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason);
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid);
|
||||
|
||||
@@ -149,8 +149,12 @@ static const struct inode_operations scoutfs_file_iops = {
|
||||
.removexattr = generic_removexattr,
|
||||
#endif
|
||||
.listxattr = scoutfs_listxattr,
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
.get_inode_acl = scoutfs_get_acl,
|
||||
#else
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
#endif
|
||||
#ifdef KC_SET_ACL_DENTRY
|
||||
.set_acl = scoutfs_set_acl,
|
||||
#endif
|
||||
.fiemap = scoutfs_data_fiemap,
|
||||
@@ -165,8 +169,12 @@ static const struct inode_operations scoutfs_special_iops = {
|
||||
.removexattr = generic_removexattr,
|
||||
#endif
|
||||
.listxattr = scoutfs_listxattr,
|
||||
#ifdef KC_GET_INODE_ACL
|
||||
.get_inode_acl = scoutfs_get_acl,
|
||||
#else
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
#endif
|
||||
#ifdef KC_SET_ACL_DENTRY
|
||||
.set_acl = scoutfs_set_acl,
|
||||
#endif
|
||||
};
|
||||
@@ -482,7 +490,7 @@ int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're changing the file size then the contents of the file are
|
||||
* If we're changing the file size than the contents of the file are
|
||||
* changing and we increment the data_version. This would prevent
|
||||
* staging because the data_version is per-inode today, not per-extent.
|
||||
* So if there are any offline extents within the new size then we need
|
||||
@@ -1482,12 +1490,6 @@ static int remove_index_items(struct super_block *sb, u64 ino,
|
||||
* Return an allocated and unused inode number. Returns -ENOSPC if
|
||||
* we're out of inode.
|
||||
*
|
||||
* Each parent directory has its own pool of free inode numbers. Items
|
||||
* are sorted by their inode numbers as they're stored in segments.
|
||||
* This will tend to group together files that are created in a
|
||||
* directory at the same time in segments. Concurrent creation across
|
||||
* different directories will be stored in their own regions.
|
||||
*
|
||||
* Inode numbers are never reclaimed. If the inode is evicted or we're
|
||||
* unmounted the pending inode numbers will be lost. Asking for a
|
||||
* relatively small number from the server each time will tend to
|
||||
@@ -1497,12 +1499,18 @@ static int remove_index_items(struct super_block *sb, u64 ino,
|
||||
int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret)
|
||||
{
|
||||
DECLARE_INODE_SB_INFO(sb, inf);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct inode_allocator *ia;
|
||||
u64 ino;
|
||||
u64 nr;
|
||||
int ret;
|
||||
|
||||
ia = is_dir ? &inf->dir_ino_alloc : &inf->ino_alloc;
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
if (is_dir && opts.ino_alloc_per_lock == SCOUTFS_LOCK_INODE_GROUP_NR)
|
||||
ia = &inf->dir_ino_alloc;
|
||||
else
|
||||
ia = &inf->ino_alloc;
|
||||
|
||||
spin_lock(&ia->lock);
|
||||
|
||||
@@ -1523,6 +1531,17 @@ int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret)
|
||||
*ino_ret = ia->ino++;
|
||||
ia->nr--;
|
||||
|
||||
if (opts.ino_alloc_per_lock != SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
nr = ia->ino & SCOUTFS_LOCK_INODE_GROUP_MASK;
|
||||
if (nr >= opts.ino_alloc_per_lock) {
|
||||
nr = SCOUTFS_LOCK_INODE_GROUP_NR - nr;
|
||||
if (nr > ia->nr)
|
||||
nr = ia->nr;
|
||||
ia->ino += nr;
|
||||
ia->nr -= nr;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&ia->lock);
|
||||
ret = 0;
|
||||
out:
|
||||
@@ -1626,10 +1645,14 @@ int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_
|
||||
struct scoutfs_lock *primary)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_orphan_key(&key, ino);
|
||||
|
||||
return scoutfs_item_delete_force(sb, &key, lock, primary);
|
||||
ret = scoutfs_item_delete_force(sb, &key, lock, primary);
|
||||
trace_scoutfs_inode_orphan_delete(sb, ino, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1711,6 +1734,8 @@ out:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
trace_scoutfs_delete_inode_end(sb, ino, mode, size, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1806,6 +1831,9 @@ out:
|
||||
* they've checked that the inode could really be deleted. We serialize
|
||||
* on a bit in the lock data so that we only have one deletion attempt
|
||||
* per inode under this mount's cluster lock.
|
||||
*
|
||||
* Returns -EAGAIN if we either did some cleanup work or are unable to finish
|
||||
* cleaning up this inode right now.
|
||||
*/
|
||||
static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
{
|
||||
@@ -1819,6 +1847,8 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
int bit_nr;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_try_delete(sb, ino);
|
||||
|
||||
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -1831,27 +1861,32 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
|
||||
/* only one local attempt per inode at a time */
|
||||
if (test_and_set_bit(bit_nr, ldata->trying)) {
|
||||
ret = 0;
|
||||
trace_scoutfs_try_delete_local_busy(sb, ino);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
clear_trying = true;
|
||||
|
||||
/* can't delete if it's cached in local or remote mounts */
|
||||
if (scoutfs_omap_test(sb, ino) || test_bit_le(bit_nr, ldata->map.bits)) {
|
||||
ret = 0;
|
||||
trace_scoutfs_try_delete_cached(sb, ino);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_inode_init_key(&key, ino);
|
||||
ret = lookup_inode_item(sb, &key, &sinode, lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
if (ret == -ENOENT) {
|
||||
trace_scoutfs_try_delete_no_item(sb, ino);
|
||||
ret = 0;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(sinode.nlink) > 0) {
|
||||
ret = 0;
|
||||
trace_scoutfs_try_delete_has_links(sb, ino, le32_to_cpu(sinode.nlink));
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1860,8 +1895,10 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
goto out;
|
||||
|
||||
ret = delete_inode_items(sb, ino, &sinode, lock, orph_lock);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
ret = -EAGAIN;
|
||||
scoutfs_inc_counter(sb, inode_deleted);
|
||||
}
|
||||
|
||||
out:
|
||||
if (clear_trying)
|
||||
@@ -2063,6 +2100,10 @@ void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb)
|
||||
* a locally cached inode. Then we ask the server for the open map
|
||||
* containing the inode. Only if we don't see any cached users do we do
|
||||
* the expensive work of acquiring locks to try and delete the items.
|
||||
*
|
||||
* We need to track whether there is any orphan cleanup work remaining so
|
||||
* that tests such as inode-deletion can watch the orphan_scan_empty counter
|
||||
* to determine when inode cleanup from open-unlink scenarios is complete.
|
||||
*/
|
||||
static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -2074,11 +2115,14 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key last;
|
||||
struct scoutfs_key key;
|
||||
bool work_todo = false;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
u64 ino;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_orphan_scan_start(sb);
|
||||
|
||||
scoutfs_inc_counter(sb, orphan_scan);
|
||||
|
||||
init_orphan_key(&last, U64_MAX);
|
||||
@@ -2098,8 +2142,10 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
init_orphan_key(&key, ino);
|
||||
ret = scoutfs_btree_next(sb, &roots.fs_root, &key, &iref);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
if (ret == -ENOENT) {
|
||||
trace_scoutfs_orphan_scan_work(sb, 0);
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -2114,6 +2160,7 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
|
||||
/* locally cached inodes will try to delete as they evict */
|
||||
if (scoutfs_omap_test(sb, ino)) {
|
||||
work_todo = true;
|
||||
scoutfs_inc_counter(sb, orphan_scan_cached);
|
||||
continue;
|
||||
}
|
||||
@@ -2129,13 +2176,22 @@ static void inode_orphan_scan_worker(struct work_struct *work)
|
||||
|
||||
/* remote cached inodes will also try to delete */
|
||||
if (test_bit_le(bit_nr, omap.bits)) {
|
||||
work_todo = true;
|
||||
scoutfs_inc_counter(sb, orphan_scan_omap_set);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* seemingly orphaned and unused, get locks and check for sure */
|
||||
scoutfs_inc_counter(sb, orphan_scan_attempts);
|
||||
trace_scoutfs_orphan_scan_work(sb, ino);
|
||||
|
||||
ret = try_delete_inode_items(sb, ino);
|
||||
if (ret == -EAGAIN) {
|
||||
work_todo = true;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
trace_scoutfs_orphan_scan_end(sb, ino, ret);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
@@ -2144,6 +2200,11 @@ out:
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, orphan_scan_error);
|
||||
|
||||
if (!work_todo)
|
||||
scoutfs_inc_counter(sb, orphan_scan_empty);
|
||||
|
||||
trace_scoutfs_orphan_scan_stop(sb, work_todo);
|
||||
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
}
|
||||
|
||||
|
||||
@@ -441,8 +441,6 @@ static long scoutfs_ioc_data_wait_err(struct file *file, unsigned long arg)
|
||||
|
||||
if (!S_ISREG(inode->i_mode)) {
|
||||
ret = -EINVAL;
|
||||
} else if (scoutfs_inode_data_version(inode) != args.data_version) {
|
||||
ret = -ESTALE;
|
||||
} else {
|
||||
ret = scoutfs_data_wait_err(inode, sblock, eblock, args.op,
|
||||
args.err);
|
||||
|
||||
@@ -366,10 +366,15 @@ struct scoutfs_ioctl_statfs_more {
|
||||
*
|
||||
* Find current waiters that match the inode, op, and block range to wake
|
||||
* up and return an error.
|
||||
*
|
||||
* (*) ca. v1.25 and earlier required that the data_version passed match
|
||||
* that of the waiter, but this check is removed. It was never needed
|
||||
* because no data is modified during this ioctl. Any data_version value
|
||||
* here is thus since then ignored.
|
||||
*/
|
||||
struct scoutfs_ioctl_data_wait_err {
|
||||
__u64 ino;
|
||||
__u64 data_version;
|
||||
__u64 data_version; /* Ignored, see above (*) */
|
||||
__u64 offset;
|
||||
__u64 count;
|
||||
__u64 op;
|
||||
|
||||
@@ -195,9 +195,11 @@ struct kc_shrinker_wrapper {
|
||||
#include <linux/inet.h>
|
||||
static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int addrlen = sizeof(struct sockaddr_storage);
|
||||
int ret = kernel_getsockname(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
if (ret == 0 && (!(
|
||||
(addrlen == sizeof(struct sockaddr_in)) ||
|
||||
(addrlen == sizeof(struct sockaddr_in6)))))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
@@ -206,9 +208,11 @@ static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *ad
|
||||
}
|
||||
static inline int kc_kernel_getpeername(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int addrlen = sizeof(struct sockaddr_storage);
|
||||
int ret = kernel_getpeername(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
if (ret == 0 && (!(
|
||||
(addrlen == sizeof(struct sockaddr_in)) ||
|
||||
(addrlen == sizeof(struct sockaddr_in6)))))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
955
kmod/src/lock.c
955
kmod/src/lock.c
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,6 @@
|
||||
#ifndef _SCOUTFS_LOCK_H_
|
||||
#define _SCOUTFS_LOCK_H_
|
||||
|
||||
#include <linux/rhashtable.h>
|
||||
|
||||
#include "key.h"
|
||||
#include "tseq.h"
|
||||
|
||||
@@ -21,24 +19,20 @@ struct inode_deletion_lock_data;
|
||||
*/
|
||||
struct scoutfs_lock {
|
||||
struct super_block *sb;
|
||||
atomic_t refcount;
|
||||
spinlock_t lock;
|
||||
struct rcu_head rcu_head;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct rhash_head ht_head;
|
||||
struct rb_node node;
|
||||
struct rb_node range_node;
|
||||
u64 refresh_gen;
|
||||
u64 write_seq;
|
||||
u64 dirty_trans_seq;
|
||||
struct list_head lru_head;
|
||||
int lru_on_list;
|
||||
wait_queue_head_t waitq;
|
||||
unsigned long request_pending:1,
|
||||
invalidate_pending:1;
|
||||
|
||||
struct list_head inv_head; /* entry in linfo's list of locks with invalidations */
|
||||
struct list_head inv_req_list; /* list of lock's invalidation requests */
|
||||
struct list_head inv_list; /* list of lock's invalidation requests */
|
||||
struct list_head shrink_head;
|
||||
|
||||
spinlock_t cov_list_lock;
|
||||
|
||||
@@ -506,6 +506,19 @@ out:
|
||||
* because we don't know which locks they'll hold. Once recover
|
||||
* finishes the server calls us to kick all the locks that were waiting
|
||||
* during recovery.
|
||||
*
|
||||
* The calling server shuts down if we return errors indicating that we
|
||||
* weren't able to ensure forward progress in the lock state machine.
|
||||
*
|
||||
* Failure to send to a disconnected client is not a fatal error.
|
||||
* During normal disconnection the client's state is removed before
|
||||
* their connection is destroyed. We can't use state to try and send to
|
||||
* a non-existing connection. But a client that fails to reconnect is
|
||||
* disconnected before being fenced. If we have multiple disconnected
|
||||
* clients we can try to send to one while cleaning up another. If
|
||||
* they've uncleanly disconnected their locks are going to be removed
|
||||
* and the lock can make forward progress again. Or we'll shutdown for
|
||||
* failure to fence.
|
||||
*/
|
||||
static int process_waiting_requests(struct super_block *sb,
|
||||
struct server_lock_node *snode)
|
||||
@@ -597,6 +610,10 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
out:
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
/* disconnected clients will be fenced, trying to send to them isn't fatal */
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,12 @@ do { \
|
||||
} \
|
||||
} while (0) \
|
||||
|
||||
#define scoutfs_bug_on_err(sb, err, fmt, args...) \
|
||||
do { \
|
||||
__typeof__(err) _err = (err); \
|
||||
scoutfs_bug_on(sb, _err < 0 && _err != -ENOLINK, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Each message is only generated once per volume. Remounting resets
|
||||
* the messages.
|
||||
|
||||
229
kmod/src/net.c
229
kmod/src/net.c
@@ -21,6 +21,7 @@
|
||||
#include <net/tcp.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -125,6 +126,7 @@ struct message_send {
|
||||
unsigned long dead:1;
|
||||
struct list_head head;
|
||||
scoutfs_net_response_t resp_func;
|
||||
struct rb_node node;
|
||||
void *resp_data;
|
||||
struct scoutfs_net_header nh;
|
||||
};
|
||||
@@ -161,49 +163,118 @@ static bool nh_is_request(struct scoutfs_net_header *nh)
|
||||
return !nh_is_response(nh);
|
||||
}
|
||||
|
||||
static int cmp_sorted_msend(u64 pos, struct message_send *msend)
|
||||
{
|
||||
if (nh_is_request(&msend->nh))
|
||||
return pos < le64_to_cpu(msend->nh.id) ? -1 :
|
||||
pos > le64_to_cpu(msend->nh.id) ? 1 : 0;
|
||||
else
|
||||
return pos < le64_to_cpu(msend->nh.seq) ? -1 :
|
||||
pos > le64_to_cpu(msend->nh.seq) ? 1 : 0;
|
||||
}
|
||||
|
||||
static struct message_send *search_sorted_msends(struct rb_root *root, u64 pos, struct rb_node *ins)
|
||||
{
|
||||
struct rb_node **node = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct message_send *msend = NULL;
|
||||
struct message_send *next = NULL;
|
||||
int cmp = -1;
|
||||
|
||||
while (*node) {
|
||||
parent = *node;
|
||||
msend = container_of(*node, struct message_send, node);
|
||||
|
||||
cmp = cmp_sorted_msend(pos, msend);
|
||||
if (cmp < 0) {
|
||||
next = msend;
|
||||
node = &(*node)->rb_left;
|
||||
} else if (cmp > 0) {
|
||||
node = &(*node)->rb_right;
|
||||
} else {
|
||||
next = msend;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(cmp == 0 && ins);
|
||||
|
||||
if (ins) {
|
||||
rb_link_node(ins, parent, node);
|
||||
rb_insert_color(ins, root);
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
static struct message_send *next_sorted_msend(struct message_send *msend)
|
||||
{
|
||||
struct rb_node *node = rb_next(&msend->node);
|
||||
|
||||
return node ? rb_entry(node, struct message_send, node) : NULL;
|
||||
}
|
||||
|
||||
#define for_each_sorted_msend(MSEND_, TMP_, ROOT_, POS_) \
|
||||
for (MSEND_ = search_sorted_msends(ROOT_, POS_, NULL); \
|
||||
MSEND_ != NULL && ({ TMP_ = next_sorted_msend(MSEND_); true; }); \
|
||||
MSEND_ = TMP_)
|
||||
|
||||
static void insert_sorted_msend(struct scoutfs_net_connection *conn, struct message_send *msend)
|
||||
{
|
||||
BUG_ON(!RB_EMPTY_NODE(&msend->node));
|
||||
|
||||
if (nh_is_request(&msend->nh))
|
||||
search_sorted_msends(&conn->req_root, le64_to_cpu(msend->nh.id), &msend->node);
|
||||
else
|
||||
search_sorted_msends(&conn->resp_root, le64_to_cpu(msend->nh.seq), &msend->node);
|
||||
}
|
||||
|
||||
static void erase_sorted_msend(struct scoutfs_net_connection *conn, struct message_send *msend)
|
||||
{
|
||||
if (!RB_EMPTY_NODE(&msend->node)) {
|
||||
if (nh_is_request(&msend->nh))
|
||||
rb_erase(&msend->node, &conn->req_root);
|
||||
else
|
||||
rb_erase(&msend->node, &conn->resp_root);
|
||||
RB_CLEAR_NODE(&msend->node);
|
||||
}
|
||||
}
|
||||
|
||||
static void move_sorted_msends(struct scoutfs_net_connection *dst_conn, struct rb_root *dst_root,
|
||||
struct scoutfs_net_connection *src_conn, struct rb_root *src_root)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
|
||||
for_each_sorted_msend(msend, tmp, src_root, 0) {
|
||||
erase_sorted_msend(src_conn, msend);
|
||||
insert_sorted_msend(dst_conn, msend);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We return dead requests so that the caller can stop searching other
|
||||
* lists for the dead request that we found.
|
||||
* Pending requests are uniquely identified by the id they were assigned
|
||||
* as they were first put on the send queue.
|
||||
*/
|
||||
static struct message_send *search_list(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list,
|
||||
u8 cmd, u64 id)
|
||||
static struct message_send *find_request(struct scoutfs_net_connection *conn, u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
list_for_each_entry(msend, list, head) {
|
||||
if (nh_is_request(&msend->nh) && msend->nh.cmd == cmd &&
|
||||
le64_to_cpu(msend->nh.id) == id)
|
||||
return msend;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an active send request on the lists. It's almost certainly
|
||||
* waiting on the resend queue but it could be actively being sent.
|
||||
*/
|
||||
static struct message_send *find_request(struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
msend = search_list(conn, &conn->resend_queue, cmd, id) ?:
|
||||
search_list(conn, &conn->send_queue, cmd, id);
|
||||
if (msend && msend->dead)
|
||||
msend = search_sorted_msends(&conn->req_root, id, NULL);
|
||||
if (msend && !(msend->nh.cmd == cmd && le64_to_cpu(msend->nh.id) == id))
|
||||
msend = NULL;
|
||||
|
||||
return msend;
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete a send message by moving it to the send queue and marking it
|
||||
* to be freed. It won't be visible to callers trying to find sends.
|
||||
* Free a send message by moving it to the send queue and marking it
|
||||
* dead. It is removed from the sorted rb roots so it won't be visible
|
||||
* as a request for response processing.
|
||||
*/
|
||||
static void complete_send(struct scoutfs_net_connection *conn,
|
||||
struct message_send *msend)
|
||||
static void queue_dead_free(struct scoutfs_net_connection *conn, struct message_send *msend)
|
||||
{
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
@@ -213,6 +284,7 @@ static void complete_send(struct scoutfs_net_connection *conn,
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
erase_sorted_msend(conn, msend);
|
||||
queue_work(conn->workq, &conn->send_work);
|
||||
}
|
||||
|
||||
@@ -370,6 +442,7 @@ static int submit_send(struct super_block *sb,
|
||||
msend->resp_func = resp_func;
|
||||
msend->resp_data = resp_data;
|
||||
msend->dead = 0;
|
||||
RB_CLEAR_NODE(&msend->node);
|
||||
|
||||
msend->nh.seq = cpu_to_le64(seq);
|
||||
msend->nh.recv_seq = 0; /* set when sent, not when queued */
|
||||
@@ -390,6 +463,7 @@ static int submit_send(struct super_block *sb,
|
||||
} else {
|
||||
list_add_tail(&msend->head, &conn->resend_queue);
|
||||
}
|
||||
insert_sorted_msend(conn, msend);
|
||||
|
||||
if (id_ret)
|
||||
*id_ret = le64_to_cpu(msend->nh.id);
|
||||
@@ -459,7 +533,7 @@ static int process_response(struct scoutfs_net_connection *conn,
|
||||
if (msend) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
complete_send(conn, msend);
|
||||
queue_dead_free(conn, msend);
|
||||
} else {
|
||||
scoutfs_inc_counter(sb, net_dropped_response);
|
||||
}
|
||||
@@ -550,43 +624,21 @@ static void queue_ordered_proc(struct scoutfs_net_connection *conn, struct messa
|
||||
* Free live responses up to and including the seq by marking them dead
|
||||
* and moving them to the send queue to be freed.
|
||||
*/
|
||||
static bool move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
bool moved = false;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
list_for_each_entry_safe(msend, tmp, list, head) {
|
||||
if (le64_to_cpu(msend->nh.seq) > seq)
|
||||
break;
|
||||
if (!nh_is_response(&msend->nh) || msend->dead)
|
||||
continue;
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
moved = true;
|
||||
}
|
||||
|
||||
return moved;
|
||||
}
|
||||
|
||||
/* acks are processed inline in the recv worker */
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
bool moved;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) |
|
||||
move_acked_responses(conn, &conn->resend_queue, seq);
|
||||
for_each_sorted_msend(msend, tmp, &conn->resp_root, 0) {
|
||||
if (le64_to_cpu(msend->nh.seq) > seq)
|
||||
break;
|
||||
|
||||
queue_dead_free(conn, msend);
|
||||
}
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
if (moved)
|
||||
queue_work(conn->workq, &conn->send_work);
|
||||
}
|
||||
|
||||
static int k_recvmsg(struct socket *sock, void *buf, unsigned len)
|
||||
@@ -824,9 +876,11 @@ static int k_sendmsg_full(struct socket *sock, struct kvec *kv, unsigned long nr
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_msend(struct net_info *ninf, struct message_send *msend)
|
||||
static void free_msend(struct net_info *ninf, struct scoutfs_net_connection *conn,
|
||||
struct message_send *msend)
|
||||
{
|
||||
list_del_init(&msend->head);
|
||||
erase_sorted_msend(conn, msend);
|
||||
scoutfs_tseq_del(&ninf->msg_tseq_tree, &msend->tseq_entry);
|
||||
kfree(msend);
|
||||
}
|
||||
@@ -866,9 +920,10 @@ static void scoutfs_net_send_worker(struct work_struct *work)
|
||||
count = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
list_for_each_entry_safe(msend, _msend_, &conn->send_queue, head) {
|
||||
if (msend->dead) {
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -957,7 +1012,7 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
|
||||
list_splice_init(&conn->resend_queue, &conn->send_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head)
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
|
||||
/* accepted sockets are removed from their listener's list */
|
||||
if (conn->listening_conn) {
|
||||
@@ -1163,7 +1218,8 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
|
||||
trace_scoutfs_net_connect_work_enter(sb, 0, 0);
|
||||
|
||||
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
ret = kc_sock_create_kern(conn->connect_sin.ss_family,
|
||||
SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1184,7 +1240,9 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
trace_scoutfs_conn_connect_start(conn);
|
||||
|
||||
ret = kernel_connect(sock, (struct sockaddr *)&conn->connect_sin,
|
||||
sizeof(struct sockaddr_in), 0);
|
||||
conn->connect_sin.ss_family == AF_INET ?
|
||||
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6),
|
||||
0);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1226,6 +1284,13 @@ static bool empty_accepted_list(struct scoutfs_net_connection *conn)
|
||||
return empty;
|
||||
}
|
||||
|
||||
/*
|
||||
* sockaddr_storage wraps both _in and _in6, which have _port always
|
||||
* __be16 at the same offset, and we only need to test whether it's
|
||||
* zero.
|
||||
*/
|
||||
#define sockaddr_port_is_nonzero(sin) ((sin).__data[0] || (sin).__data[1])
|
||||
|
||||
/*
|
||||
* Safely shut down an active connection. This can be triggered by
|
||||
* errors in workers or by an external call to free the connection. The
|
||||
@@ -1249,7 +1314,7 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
trace_scoutfs_conn_shutdown_start(conn);
|
||||
|
||||
/* connected and accepted conns print a message */
|
||||
if (conn->peername.sin_port != 0)
|
||||
if (sockaddr_port_is_nonzero(conn->peername))
|
||||
scoutfs_info(sb, "%s "SIN_FMT" -> "SIN_FMT,
|
||||
conn->listening_conn ? "server closing" :
|
||||
"client disconnected",
|
||||
@@ -1303,7 +1368,7 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
struct message_send, head))) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
call_resp_func(sb, conn, resp_func, resp_data, NULL, 0, -ECONNABORTED);
|
||||
@@ -1319,7 +1384,7 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
list_splice_tail_init(&conn->send_queue, &conn->resend_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->resend_queue, head) {
|
||||
if (msend->nh.cmd == SCOUTFS_NET_CMD_GREETING)
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
}
|
||||
|
||||
clear_conn_fl(conn, saw_greeting);
|
||||
@@ -1379,6 +1444,7 @@ static void scoutfs_net_reconn_free_worker(struct work_struct *work)
|
||||
DEFINE_CONN_FROM_WORK(conn, work, reconn_free_dwork.work);
|
||||
struct super_block *sb = conn->sb;
|
||||
struct scoutfs_net_connection *acc;
|
||||
union scoutfs_inet_addr addr;
|
||||
unsigned long now = jiffies;
|
||||
unsigned long deadline = 0;
|
||||
bool requeue = false;
|
||||
@@ -1399,8 +1465,9 @@ restart:
|
||||
if (!test_conn_fl(conn, shutting_down)) {
|
||||
scoutfs_info(sb, "client "SIN_FMT" reconnect timed out, fencing",
|
||||
SIN_ARG(&acc->last_peername));
|
||||
scoutfs_sin_to_addr(&addr, &acc->last_peername);
|
||||
ret = scoutfs_fence_start(sb, acc->rid,
|
||||
acc->last_peername.sin_addr.s_addr,
|
||||
&addr,
|
||||
SCOUTFS_FENCE_CLIENT_RECONNECT);
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "client fence returned err %d, shutting down server",
|
||||
@@ -1483,9 +1550,9 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
conn->req_funcs = req_funcs;
|
||||
spin_lock_init(&conn->lock);
|
||||
init_waitqueue_head(&conn->waitq);
|
||||
conn->sockname.sin_family = AF_INET;
|
||||
conn->peername.sin_family = AF_INET;
|
||||
conn->last_peername.sin_family = AF_INET;
|
||||
conn->sockname.ss_family = AF_UNSPEC;
|
||||
conn->peername.ss_family = AF_UNSPEC;
|
||||
conn->last_peername.ss_family = AF_UNSPEC;
|
||||
INIT_LIST_HEAD(&conn->accepted_head);
|
||||
INIT_LIST_HEAD(&conn->accepted_list);
|
||||
conn->next_send_seq = 1;
|
||||
@@ -1493,6 +1560,8 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
atomic64_set(&conn->recv_seq, 0);
|
||||
INIT_LIST_HEAD(&conn->send_queue);
|
||||
INIT_LIST_HEAD(&conn->resend_queue);
|
||||
conn->req_root = RB_ROOT;
|
||||
conn->resp_root = RB_ROOT;
|
||||
INIT_WORK(&conn->listen_work, scoutfs_net_listen_worker);
|
||||
INIT_WORK(&conn->connect_work, scoutfs_net_connect_worker);
|
||||
INIT_WORK(&conn->send_work, scoutfs_net_send_worker);
|
||||
@@ -1562,7 +1631,7 @@ void scoutfs_net_free_conn(struct super_block *sb,
|
||||
*/
|
||||
int scoutfs_net_bind(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct sockaddr_in *sin)
|
||||
struct sockaddr_storage *sin)
|
||||
{
|
||||
struct socket *sock = NULL;
|
||||
int addrlen;
|
||||
@@ -1573,7 +1642,7 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(conn->sock))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
ret = kc_sock_create_kern(sin->ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1585,7 +1654,7 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
addrlen = sin->ss_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
|
||||
ret = kernel_bind(sock, (struct sockaddr *)sin, addrlen);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -1601,7 +1670,7 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
ret = 0;
|
||||
|
||||
conn->sock = sock;
|
||||
*sin = conn->sockname;
|
||||
sin = (struct sockaddr_storage *)&conn->sockname;
|
||||
|
||||
out:
|
||||
if (ret < 0 && sock)
|
||||
@@ -1636,7 +1705,7 @@ static bool connect_result(struct scoutfs_net_connection *conn, int *error)
|
||||
done = true;
|
||||
*error = 0;
|
||||
} else if (test_conn_fl(conn, shutting_down) ||
|
||||
conn->connect_sin.sin_family == 0) {
|
||||
conn->connect_sin.ss_family == AF_UNSPEC) {
|
||||
done = true;
|
||||
*error = -ESHUTDOWN;
|
||||
}
|
||||
@@ -1657,7 +1726,7 @@ static bool connect_result(struct scoutfs_net_connection *conn, int *error)
|
||||
*/
|
||||
int scoutfs_net_connect(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct sockaddr_in *sin, unsigned long timeout_ms)
|
||||
struct sockaddr_storage *sin, unsigned long timeout_ms)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@@ -1705,7 +1774,7 @@ void scoutfs_net_client_greeting(struct super_block *sb,
|
||||
atomic64_set(&conn->recv_seq, 0);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->resend_queue, head){
|
||||
if (nh_is_response(&msend->nh))
|
||||
free_msend(ninf, msend);
|
||||
free_msend(ninf, conn, msend);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1808,6 +1877,8 @@ restart:
|
||||
BUG_ON(!list_empty(&reconn->send_queue));
|
||||
/* queued greeting response is racing, can be in send or resend queue */
|
||||
list_splice_tail_init(&reconn->resend_queue, &conn->resend_queue);
|
||||
move_sorted_msends(conn, &conn->req_root, reconn, &reconn->req_root);
|
||||
move_sorted_msends(conn, &conn->resp_root, reconn, &reconn->resp_root);
|
||||
|
||||
/* new conn info is unused, swap, old won't call down */
|
||||
swap(conn->info, reconn->info);
|
||||
|
||||
@@ -49,15 +49,15 @@ struct scoutfs_net_connection {
|
||||
unsigned long flags; /* CONN_FL_* bitmask */
|
||||
unsigned long reconn_deadline;
|
||||
|
||||
struct sockaddr_in connect_sin;
|
||||
struct sockaddr_storage connect_sin;
|
||||
unsigned long connect_timeout_ms;
|
||||
|
||||
struct socket *sock;
|
||||
u64 rid;
|
||||
u64 greeting_id;
|
||||
struct sockaddr_in sockname;
|
||||
struct sockaddr_in peername;
|
||||
struct sockaddr_in last_peername;
|
||||
struct sockaddr_storage sockname;
|
||||
struct sockaddr_storage peername;
|
||||
struct sockaddr_storage last_peername;
|
||||
|
||||
struct list_head accepted_head;
|
||||
struct scoutfs_net_connection *listening_conn;
|
||||
@@ -67,6 +67,8 @@ struct scoutfs_net_connection {
|
||||
u64 next_send_id;
|
||||
struct list_head send_queue;
|
||||
struct list_head resend_queue;
|
||||
struct rb_root req_root;
|
||||
struct rb_root resp_root;
|
||||
|
||||
atomic64_t recv_seq;
|
||||
unsigned int ordered_proc_nr;
|
||||
@@ -97,27 +99,44 @@ enum conn_flags {
|
||||
CONN_FL_reconn_freeing = (1UL << 6), /* waiting done, setter frees */
|
||||
};
|
||||
|
||||
#define SIN_FMT "%pIS:%u"
|
||||
#define SIN_ARG(sin) sin, be16_to_cpu((sin)->sin_port)
|
||||
#define SIN_FMT "%pISpc"
|
||||
#define SIN_ARG(sin) sin
|
||||
|
||||
static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
|
||||
static inline void scoutfs_addr_to_sin(struct sockaddr_storage *sin,
|
||||
union scoutfs_inet_addr *addr)
|
||||
{
|
||||
BUG_ON(addr->v4.family != cpu_to_le16(SCOUTFS_AF_IPV4));
|
||||
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->v4.addr));
|
||||
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
|
||||
if (addr->v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
|
||||
struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
|
||||
memset(sin, 0, sizeof(struct sockaddr_storage));
|
||||
sin4->sin_family = AF_INET;
|
||||
sin4->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->v4.addr));
|
||||
sin4->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
|
||||
} else if (addr->v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
|
||||
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
|
||||
memset(sin, 0, sizeof(struct sockaddr_storage));
|
||||
sin6->sin6_family = AF_INET6;
|
||||
memcpy(&sin6->sin6_addr.in6_u.u6_addr8, &addr->v6.addr, 16);
|
||||
sin6->sin6_port = cpu_to_be16(le16_to_cpu(addr->v6.port));
|
||||
} else
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_in *sin)
|
||||
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_storage *sin)
|
||||
{
|
||||
BUG_ON(sin->sin_family != AF_INET);
|
||||
|
||||
memset(addr, 0, sizeof(union scoutfs_inet_addr));
|
||||
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
addr->v4.addr = be32_to_le32(sin->sin_addr.s_addr);
|
||||
addr->v4.port = be16_to_le16(sin->sin_port);
|
||||
if (sin->ss_family == AF_INET) {
|
||||
struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
|
||||
memset(addr, 0, sizeof(union scoutfs_inet_addr));
|
||||
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
addr->v4.addr = be32_to_le32(sin4->sin_addr.s_addr);
|
||||
addr->v4.port = be16_to_le16(sin4->sin_port);
|
||||
} else if (sin->ss_family == AF_INET6) {
|
||||
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
|
||||
memset(addr, 0, sizeof(union scoutfs_inet_addr));
|
||||
addr->v6.family = cpu_to_le16(SCOUTFS_AF_IPV6);
|
||||
memcpy(&addr->v6.addr, &sin6->sin6_addr.in6_u.u6_addr8, 16);
|
||||
addr->v6.port = be16_to_le16(sin6->sin6_port);
|
||||
} else
|
||||
BUG();
|
||||
}
|
||||
|
||||
struct scoutfs_net_connection *
|
||||
@@ -128,10 +147,10 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
u64 scoutfs_net_client_rid(struct scoutfs_net_connection *conn);
|
||||
int scoutfs_net_connect(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct sockaddr_in *sin, unsigned long timeout_ms);
|
||||
struct sockaddr_storage *sin, unsigned long timeout_ms);
|
||||
int scoutfs_net_bind(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct sockaddr_in *sin);
|
||||
struct sockaddr_storage *sin);
|
||||
void scoutfs_net_listen(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn);
|
||||
int scoutfs_net_submit_request(struct super_block *sb,
|
||||
|
||||
@@ -33,6 +33,8 @@ enum {
|
||||
Opt_acl,
|
||||
Opt_data_prealloc_blocks,
|
||||
Opt_data_prealloc_contig_only,
|
||||
Opt_ino_alloc_per_lock,
|
||||
Opt_lock_idle_count,
|
||||
Opt_log_merge_wait_timeout_ms,
|
||||
Opt_metadev_path,
|
||||
Opt_noacl,
|
||||
@@ -47,6 +49,8 @@ static const match_table_t tokens = {
|
||||
{Opt_acl, "acl"},
|
||||
{Opt_data_prealloc_blocks, "data_prealloc_blocks=%s"},
|
||||
{Opt_data_prealloc_contig_only, "data_prealloc_contig_only=%s"},
|
||||
{Opt_ino_alloc_per_lock, "ino_alloc_per_lock=%s"},
|
||||
{Opt_lock_idle_count, "lock_idle_count=%s"},
|
||||
{Opt_log_merge_wait_timeout_ms, "log_merge_wait_timeout_ms=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_noacl, "noacl"},
|
||||
@@ -117,6 +121,10 @@ static void free_options(struct scoutfs_mount_options *opts)
|
||||
kfree(opts->metadev_path);
|
||||
}
|
||||
|
||||
#define MIN_LOCK_IDLE_COUNT 32
|
||||
#define DEFAULT_LOCK_IDLE_COUNT (10 * 1000)
|
||||
#define MAX_LOCK_IDLE_COUNT (100 * 1000)
|
||||
|
||||
#define MIN_LOG_MERGE_WAIT_TIMEOUT_MS 100UL
|
||||
#define DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS 500
|
||||
#define MAX_LOG_MERGE_WAIT_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
@@ -136,6 +144,8 @@ static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
|
||||
opts->data_prealloc_blocks = SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS;
|
||||
opts->data_prealloc_contig_only = 1;
|
||||
opts->ino_alloc_per_lock = SCOUTFS_LOCK_INODE_GROUP_NR;
|
||||
opts->lock_idle_count = DEFAULT_LOCK_IDLE_COUNT;
|
||||
opts->log_merge_wait_timeout_ms = DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS;
|
||||
opts->orphan_scan_delay_ms = -1;
|
||||
opts->quorum_heartbeat_timeout_ms = SCOUTFS_QUORUM_DEF_HB_TIMEO_MS;
|
||||
@@ -143,6 +153,21 @@ static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
opts->tcp_keepalive_timeout_ms = DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
static int verify_lock_idle_count(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse lock_idle_count value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < MIN_LOCK_IDLE_COUNT || val > MAX_LOCK_IDLE_COUNT) {
|
||||
scoutfs_err(sb, "invalid lock_idle_count value %d, must be between %u and %u",
|
||||
val, MIN_LOCK_IDLE_COUNT, MAX_LOCK_IDLE_COUNT);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_log_merge_wait_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
@@ -238,6 +263,18 @@ static int parse_options(struct super_block *sb, char *options, struct scoutfs_m
|
||||
opts->data_prealloc_contig_only = nr;
|
||||
break;
|
||||
|
||||
case Opt_ino_alloc_per_lock:
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 1 || nr > SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
scoutfs_err(sb, "invalid ino_alloc_per_lock option, must be between 1 and %u",
|
||||
SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->ino_alloc_per_lock = nr;
|
||||
break;
|
||||
|
||||
case Opt_tcp_keepalive_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_tcp_keepalive_timeout_ms(sb, ret, nr);
|
||||
@@ -246,6 +283,14 @@ static int parse_options(struct super_block *sb, char *options, struct scoutfs_m
|
||||
opts->tcp_keepalive_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_lock_idle_count:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_lock_idle_count(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->lock_idle_count = nr;
|
||||
break;
|
||||
|
||||
case Opt_log_merge_wait_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, nr);
|
||||
@@ -393,6 +438,7 @@ int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
seq_puts(seq, ",acl");
|
||||
seq_printf(seq, ",data_prealloc_blocks=%llu", opts.data_prealloc_blocks);
|
||||
seq_printf(seq, ",data_prealloc_contig_only=%u", opts.data_prealloc_contig_only);
|
||||
seq_printf(seq, ",ino_alloc_per_lock=%u", opts.ino_alloc_per_lock);
|
||||
seq_printf(seq, ",metadev_path=%s", opts.metadev_path);
|
||||
if (!is_acl)
|
||||
seq_puts(seq, ",noacl");
|
||||
@@ -481,6 +527,82 @@ static ssize_t data_prealloc_contig_only_store(struct kobject *kobj, struct kobj
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_contig_only);
|
||||
|
||||
static ssize_t ino_alloc_per_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.ino_alloc_per_lock);
|
||||
}
|
||||
static ssize_t ino_alloc_per_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < 1 || val > SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
scoutfs_err(sb, "invalid ino_alloc_per_lock option, must be between 1 and %u",
|
||||
SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.ino_alloc_per_lock = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(ino_alloc_per_lock);
|
||||
|
||||
static ssize_t lock_idle_count_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.lock_idle_count);
|
||||
}
|
||||
static ssize_t lock_idle_count_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
int val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoint(nullterm, 0, &val);
|
||||
ret = verify_lock_idle_count(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.lock_idle_count = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(lock_idle_count);
|
||||
|
||||
static ssize_t log_merge_wait_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -621,6 +743,8 @@ SCOUTFS_ATTR_RO(quorum_slot_nr);
|
||||
static struct attribute *options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_blocks),
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_contig_only),
|
||||
SCOUTFS_ATTR_PTR(ino_alloc_per_lock),
|
||||
SCOUTFS_ATTR_PTR(lock_idle_count),
|
||||
SCOUTFS_ATTR_PTR(log_merge_wait_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(orphan_scan_delay_ms),
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
struct scoutfs_mount_options {
|
||||
u64 data_prealloc_blocks;
|
||||
bool data_prealloc_contig_only;
|
||||
unsigned int ino_alloc_per_lock;
|
||||
int lock_idle_count;
|
||||
unsigned int log_merge_wait_timeout_ms;
|
||||
char *metadev_path;
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
|
||||
@@ -145,14 +145,26 @@ struct quorum_info {
|
||||
#define DECLARE_QUORUM_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_QUORUM_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
static bool quorum_slot_present(struct scoutfs_quorum_config *qconf, int i)
|
||||
static bool quorum_slot_ipv4(struct scoutfs_quorum_config *qconf, int i)
|
||||
{
|
||||
BUG_ON(i < 0 || i > SCOUTFS_QUORUM_MAX_SLOTS);
|
||||
|
||||
return qconf->slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
}
|
||||
|
||||
static void quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_in *sin)
|
||||
static bool quorum_slot_ipv6(struct scoutfs_quorum_config *qconf, int i)
|
||||
{
|
||||
BUG_ON(i < 0 || i > SCOUTFS_QUORUM_MAX_SLOTS);
|
||||
|
||||
return qconf->slots[i].addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6);
|
||||
}
|
||||
|
||||
static bool quorum_slot_present(struct scoutfs_quorum_config *qconf, int i)
|
||||
{
|
||||
return quorum_slot_ipv4(qconf, i) || quorum_slot_ipv6(qconf, i);
|
||||
}
|
||||
|
||||
static void quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_storage *sin)
|
||||
{
|
||||
BUG_ON(i < 0 || i >= SCOUTFS_QUORUM_MAX_SLOTS);
|
||||
|
||||
@@ -179,11 +191,18 @@ static int create_socket(struct super_block *sb)
|
||||
{
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct socket *sock = NULL;
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_storage sin;
|
||||
struct scoutfs_quorum_slot slot = qinf->qconf.slots[qinf->our_quorum_slot_nr];
|
||||
int addrlen;
|
||||
int ret;
|
||||
|
||||
ret = kc_sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
|
||||
if (le16_to_cpu(slot.addr.v4.family) == SCOUTFS_AF_IPV4)
|
||||
ret = kc_sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
|
||||
else if (le16_to_cpu(slot.addr.v6.family) == SCOUTFS_AF_IPV6)
|
||||
ret = kc_sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
|
||||
else
|
||||
BUG();
|
||||
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "quorum couldn't create udp socket: %d", ret);
|
||||
goto out;
|
||||
@@ -192,9 +211,9 @@ static int create_socket(struct super_block *sb)
|
||||
/* rather fail and retry than block waiting for free */
|
||||
sock->sk->sk_allocation = GFP_ATOMIC;
|
||||
|
||||
addrlen = (le16_to_cpu(slot.addr.v4.family) == SCOUTFS_AF_IPV4) ?
|
||||
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
|
||||
quorum_slot_sin(&qinf->qconf, qinf->our_quorum_slot_nr, &sin);
|
||||
|
||||
addrlen = sizeof(sin);
|
||||
ret = kernel_bind(sock, (struct sockaddr *)&sin, addrlen);
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "quorum failed to bind udp socket to "SIN_FMT": %d",
|
||||
@@ -241,7 +260,7 @@ static int send_msg_members(struct super_block *sb, int type, u64 term, int only
|
||||
.iov_base = &qmes,
|
||||
.iov_len = sizeof(qmes),
|
||||
};
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_storage sin;
|
||||
struct msghdr mh = {
|
||||
.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
|
||||
.msg_name = &sin,
|
||||
@@ -542,10 +561,11 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_c
|
||||
u64 term)
|
||||
{
|
||||
#define NR_OLD 2
|
||||
struct scoutfs_quorum_block_event old[SCOUTFS_QUORUM_MAX_SLOTS][NR_OLD] = {{{0,}}};
|
||||
struct scoutfs_quorum_block_event (*old)[NR_OLD];
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_quorum_block blk;
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_storage sin;
|
||||
union scoutfs_inet_addr addr;
|
||||
const __le64 lefsid = cpu_to_le64(sbi->fsid);
|
||||
const u64 rid = sbi->rid;
|
||||
bool fence_started = false;
|
||||
@@ -558,13 +578,20 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_c
|
||||
|
||||
BUILD_BUG_ON(SCOUTFS_QUORUM_BLOCKS < SCOUTFS_QUORUM_MAX_SLOTS);
|
||||
|
||||
old = kmalloc(NR_OLD * SCOUTFS_QUORUM_MAX_SLOTS * sizeof(struct scoutfs_quorum_block_event), GFP_KERNEL);
|
||||
if (!old) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(old, 0, NR_OLD * SCOUTFS_QUORUM_MAX_SLOTS * sizeof(struct scoutfs_quorum_block_event));
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (!quorum_slot_present(qconf, i))
|
||||
continue;
|
||||
|
||||
ret = read_quorum_block(sb, SCOUTFS_QUORUM_BLKNO + i, &blk, false);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
goto out_free;
|
||||
|
||||
/* elected leader still running */
|
||||
if (le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_ELECT].term) >
|
||||
@@ -598,14 +625,17 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_c
|
||||
scoutfs_info(sb, "fencing previous leader "SCSBF" at term %llu in slot %u with address "SIN_FMT,
|
||||
SCSB_LEFR_ARGS(lefsid, fence_rid),
|
||||
le64_to_cpu(old[i][j].term), i, SIN_ARG(&sin));
|
||||
ret = scoutfs_fence_start(sb, le64_to_cpu(fence_rid), sin.sin_addr.s_addr,
|
||||
scoutfs_sin_to_addr(&addr, &sin);
|
||||
ret = scoutfs_fence_start(sb, le64_to_cpu(fence_rid), &addr,
|
||||
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
goto out_free;
|
||||
fence_started = true;
|
||||
}
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(old);
|
||||
out:
|
||||
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
|
||||
if (ret == 0)
|
||||
@@ -708,7 +738,7 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
struct quorum_info *qinf = container_of(work, struct quorum_info, work);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct super_block *sb = qinf->sb;
|
||||
struct sockaddr_in unused;
|
||||
struct sockaddr_storage unused;
|
||||
struct quorum_host_msg msg;
|
||||
struct quorum_status qst = {0,};
|
||||
struct hb_recording hbr;
|
||||
@@ -990,7 +1020,7 @@ out:
|
||||
* leader with the greatest elected term. If we get it wrong the
|
||||
* connection will timeout and the client will try again.
|
||||
*/
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin)
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_storage *sin)
|
||||
{
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_quorum_block blk;
|
||||
@@ -1049,7 +1079,7 @@ u8 scoutfs_quorum_votes_needed(struct super_block *sb)
|
||||
return qinf->votes_needed;
|
||||
}
|
||||
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_in *sin)
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_storage *sin)
|
||||
{
|
||||
return quorum_slot_sin(qconf, i, sin);
|
||||
}
|
||||
@@ -1208,8 +1238,12 @@ static int verify_quorum_slots(struct super_block *sb, struct quorum_info *qinf,
|
||||
struct scoutfs_quorum_config *qconf)
|
||||
{
|
||||
char slots[(SCOUTFS_QUORUM_MAX_SLOTS * 3) + 1];
|
||||
struct sockaddr_in other;
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_storage other;
|
||||
struct sockaddr_storage sin;
|
||||
struct sockaddr_in *sin4;
|
||||
struct sockaddr_in *other4;
|
||||
struct sockaddr_in6 *sin6;
|
||||
struct sockaddr_in6 *other6;
|
||||
int found = 0;
|
||||
int ret;
|
||||
int i;
|
||||
@@ -1220,35 +1254,78 @@ static int verify_quorum_slots(struct super_block *sb, struct quorum_info *qinf,
|
||||
if (!quorum_slot_present(qconf, i))
|
||||
continue;
|
||||
|
||||
scoutfs_quorum_slot_sin(qconf, i, &sin);
|
||||
if (quorum_slot_ipv4(qconf, i)) {
|
||||
scoutfs_quorum_slot_sin(qconf, i, &sin);
|
||||
sin4 = (struct sockaddr_in *)&sin;
|
||||
|
||||
if (!valid_ipv4_unicast(sin.sin_addr.s_addr)) {
|
||||
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 unicast address: "SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!valid_ipv4_port(sin.sin_port)) {
|
||||
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 port number:"SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (!quorum_slot_present(qconf, j))
|
||||
continue;
|
||||
|
||||
scoutfs_quorum_slot_sin(qconf, j, &other);
|
||||
|
||||
if (sin.sin_addr.s_addr == other.sin_addr.s_addr &&
|
||||
sin.sin_port == other.sin_port) {
|
||||
scoutfs_err(sb, "quorum slots #%u and #%u have the same address: "SIN_FMT,
|
||||
i, j, SIN_ARG(&sin));
|
||||
if (!valid_ipv4_unicast(sin4->sin_addr.s_addr)) {
|
||||
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 unicast address: "SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
found++;
|
||||
if (!valid_ipv4_port(sin4->sin_port)) {
|
||||
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 port number:"SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (!quorum_slot_ipv4(qconf, j))
|
||||
continue;
|
||||
|
||||
scoutfs_quorum_slot_sin(qconf, j, &other);
|
||||
other4 = (struct sockaddr_in *)&other;
|
||||
|
||||
if (sin4->sin_addr.s_addr == other4->sin_addr.s_addr &&
|
||||
sin4->sin_port == other4->sin_port) {
|
||||
scoutfs_err(sb, "quorum slots #%u and #%u have the same address: "SIN_FMT,
|
||||
i, j, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
found++;
|
||||
} else if (quorum_slot_ipv6(qconf, i)) {
|
||||
quorum_slot_sin(qconf, i, &sin);
|
||||
sin6 = (struct sockaddr_in6 *)&sin;
|
||||
|
||||
if ((sin6->sin6_addr.in6_u.u6_addr32[0] == 0) && (sin6->sin6_addr.in6_u.u6_addr32[1] == 0) &&
|
||||
(sin6->sin6_addr.in6_u.u6_addr32[2] == 0) && (sin6->sin6_addr.in6_u.u6_addr32[3] == 0)) {
|
||||
scoutfs_err(sb, "quorum slot #%d has unspecified ipv6 address:"SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sin6->sin6_addr.in6_u.u6_addr8[0] == 0xff) {
|
||||
scoutfs_err(sb, "quorum slot #%d has multicast ipv6 address:"SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!valid_ipv4_port(sin6->sin6_port)) {
|
||||
scoutfs_err(sb, "quorum slot #%d has invalid ipv6 port number:"SIN_FMT,
|
||||
i, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (!quorum_slot_ipv6(qconf, j))
|
||||
continue;
|
||||
|
||||
quorum_slot_sin(qconf, j, &other);
|
||||
other6 = (struct sockaddr_in6 *)&other;
|
||||
|
||||
if ((ipv6_addr_equal(&sin6->sin6_addr, &other6->sin6_addr)) &&
|
||||
(sin6->sin6_port == other6->sin6_port)) {
|
||||
scoutfs_err(sb, "quorum slots #%u and #%u have the same address: "SIN_FMT,
|
||||
i, j, SIN_ARG(&sin));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
found++;
|
||||
}
|
||||
}
|
||||
|
||||
if (found == 0) {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#ifndef _SCOUTFS_QUORUM_H_
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_storage *sin);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i,
|
||||
struct sockaddr_in *sin);
|
||||
struct sockaddr_storage *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_config *qconf,
|
||||
u64 term);
|
||||
|
||||
@@ -789,6 +789,80 @@ TRACE_EVENT(scoutfs_inode_walk_writeback,
|
||||
__entry->ino, __entry->write, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_start,
|
||||
TP_PROTO(struct super_block *sb),
|
||||
|
||||
TP_ARGS(sb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF, SCSB_TRACE_ARGS)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_stop,
|
||||
TP_PROTO(struct super_block *sb, bool work_todo),
|
||||
|
||||
TP_ARGS(sb, work_todo),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(bool, work_todo)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->work_todo = work_todo;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" work_todo %d", SCSB_TRACE_ARGS, __entry->work_todo)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_work,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino),
|
||||
|
||||
TP_ARGS(sb, ino),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu", SCSB_TRACE_ARGS,
|
||||
__entry->ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_orphan_scan_end,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu ret %d", SCSB_TRACE_ARGS,
|
||||
__entry->ino, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_lock_info_class,
|
||||
TP_PROTO(struct super_block *sb, struct lock_info *linfo),
|
||||
|
||||
@@ -1036,6 +1110,82 @@ TRACE_EVENT(scoutfs_orphan_inode,
|
||||
MINOR(__entry->dev), __entry->ino)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_try_delete_class,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
),
|
||||
TP_printk(SCSBF" ino %llu", SCSB_TRACE_ARGS, __entry->ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete_local_busy,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete_cached,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_try_delete_class, scoutfs_try_delete_no_item,
|
||||
TP_PROTO(struct super_block *sb, u64 ino),
|
||||
TP_ARGS(sb, ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_try_delete_has_links,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, unsigned int nlink),
|
||||
|
||||
TP_ARGS(sb, ino, nlink),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(unsigned int, nlink)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->nlink = nlink;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu nlink %u", SCSB_TRACE_ARGS, __entry->ino,
|
||||
__entry->nlink)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_inode_orphan_delete,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu ret %d", SCSB_TRACE_ARGS, __entry->ino,
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_delete_inode,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, umode_t mode, u64 size),
|
||||
|
||||
@@ -1060,6 +1210,32 @@ TRACE_EVENT(scoutfs_delete_inode,
|
||||
__entry->mode, __entry->size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_delete_inode_end,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, umode_t mode, u64 size, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, mode, size, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(__u64, ino)
|
||||
__field(umode_t, mode)
|
||||
__field(__u64, size)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = sb->s_dev;
|
||||
__entry->ino = ino;
|
||||
__entry->mode = mode;
|
||||
__entry->size = size;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d ino %llu, mode 0x%x size %llu, ret %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
|
||||
__entry->mode, __entry->size, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_key_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key),
|
||||
TP_ARGS(sb, key),
|
||||
@@ -1100,7 +1276,6 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
__field(unsigned char, invalidate_pending)
|
||||
__field(int, mode)
|
||||
__field(int, invalidating_mode)
|
||||
__field(unsigned int, refcount)
|
||||
__field(unsigned int, waiters_cw)
|
||||
__field(unsigned int, waiters_pr)
|
||||
__field(unsigned int, waiters_ex)
|
||||
@@ -1119,7 +1294,6 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
__entry->invalidate_pending = lck->invalidate_pending;
|
||||
__entry->mode = lck->mode;
|
||||
__entry->invalidating_mode = lck->invalidating_mode;
|
||||
__entry->refcount = atomic_read(&lck->refcount);
|
||||
__entry->waiters_pr = lck->waiters[SCOUTFS_LOCK_READ];
|
||||
__entry->waiters_ex = lck->waiters[SCOUTFS_LOCK_WRITE];
|
||||
__entry->waiters_cw = lck->waiters[SCOUTFS_LOCK_WRITE_ONLY];
|
||||
@@ -1127,11 +1301,11 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
__entry->users_ex = lck->users[SCOUTFS_LOCK_WRITE];
|
||||
__entry->users_cw = lck->users[SCOUTFS_LOCK_WRITE_ONLY];
|
||||
),
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" mode %u invmd %u reqp %u invp %u refg %llu rfcnt %d wris %llu dts %llu waiters: pr %u ex %u cw %u users: pr %u ex %u cw %u",
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" mode %u invmd %u reqp %u invp %u refg %llu wris %llu dts %llu waiters: pr %u ex %u cw %u users: pr %u ex %u cw %u",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end),
|
||||
__entry->mode, __entry->invalidating_mode, __entry->request_pending,
|
||||
__entry->invalidate_pending, __entry->refresh_gen, __entry->refcount,
|
||||
__entry->write_seq, __entry->dirty_trans_seq,
|
||||
__entry->invalidate_pending, __entry->refresh_gen, __entry->write_seq,
|
||||
__entry->dirty_trans_seq,
|
||||
__entry->waiters_pr, __entry->waiters_ex, __entry->waiters_cw,
|
||||
__entry->users_pr, __entry->users_ex, __entry->users_cw)
|
||||
);
|
||||
@@ -1181,35 +1355,37 @@ DEFINE_EVENT(scoutfs_lock_class, scoutfs_lock_shrink,
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_net_class,
|
||||
TP_PROTO(struct super_block *sb, struct sockaddr_in *name,
|
||||
struct sockaddr_in *peer, struct scoutfs_net_header *nh),
|
||||
TP_PROTO(struct super_block *sb, struct sockaddr_storage *name,
|
||||
struct sockaddr_storage *peer, struct scoutfs_net_header *nh),
|
||||
TP_ARGS(sb, name, peer, nh),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
si4_trace_define(name)
|
||||
si4_trace_define(peer)
|
||||
__field_struct(struct sockaddr_storage, name)
|
||||
__field_struct(struct sockaddr_storage, peer)
|
||||
snh_trace_define(nh)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
si4_trace_assign(name, name);
|
||||
si4_trace_assign(peer, peer);
|
||||
memcpy(&__entry->name, name, sizeof(struct sockaddr_storage));
|
||||
memcpy(&__entry->peer, peer, sizeof(struct sockaddr_storage));
|
||||
snh_trace_assign(nh, nh);
|
||||
),
|
||||
TP_printk(SCSBF" name "SI4_FMT" peer "SI4_FMT" nh "SNH_FMT,
|
||||
SCSB_TRACE_ARGS, si4_trace_args(name), si4_trace_args(peer),
|
||||
TP_printk(SCSBF" name "SIN_FMT" peer "SIN_FMT" nh "SNH_FMT,
|
||||
SCSB_TRACE_ARGS,
|
||||
&__entry->name,
|
||||
&__entry->peer,
|
||||
snh_trace_args(nh))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_net_class, scoutfs_net_send_message,
|
||||
TP_PROTO(struct super_block *sb, struct sockaddr_in *name,
|
||||
struct sockaddr_in *peer, struct scoutfs_net_header *nh),
|
||||
TP_PROTO(struct super_block *sb, struct sockaddr_storage *name,
|
||||
struct sockaddr_storage *peer, struct scoutfs_net_header *nh),
|
||||
TP_ARGS(sb, name, peer, nh)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_net_class, scoutfs_net_recv_message,
|
||||
TP_PROTO(struct super_block *sb, struct sockaddr_in *name,
|
||||
struct sockaddr_in *peer, struct scoutfs_net_header *nh),
|
||||
TP_PROTO(struct super_block *sb, struct sockaddr_storage *name,
|
||||
struct sockaddr_storage *peer, struct scoutfs_net_header *nh),
|
||||
TP_ARGS(sb, name, peer, nh)
|
||||
);
|
||||
|
||||
@@ -1242,8 +1418,8 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
|
||||
__field(void *, sock)
|
||||
__field(__u64, c_rid)
|
||||
__field(__u64, greeting_id)
|
||||
si4_trace_define(sockname)
|
||||
si4_trace_define(peername)
|
||||
__field_struct(struct sockaddr_storage, sockname)
|
||||
__field_struct(struct sockaddr_storage, peername)
|
||||
__field(unsigned char, e_accepted_head)
|
||||
__field(void *, listening_conn)
|
||||
__field(unsigned char, e_accepted_list)
|
||||
@@ -1261,8 +1437,8 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
|
||||
__entry->sock = conn->sock;
|
||||
__entry->c_rid = conn->rid;
|
||||
__entry->greeting_id = conn->greeting_id;
|
||||
si4_trace_assign(sockname, &conn->sockname);
|
||||
si4_trace_assign(peername, &conn->peername);
|
||||
memcpy(&__entry->sockname, &conn->sockname, sizeof(struct sockaddr_storage));
|
||||
memcpy(&__entry->peername, &conn->peername, sizeof(struct sockaddr_storage));
|
||||
__entry->e_accepted_head = !!list_empty(&conn->accepted_head);
|
||||
__entry->listening_conn = conn->listening_conn;
|
||||
__entry->e_accepted_list = !!list_empty(&conn->accepted_list);
|
||||
@@ -1272,7 +1448,7 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
|
||||
__entry->e_resend_queue = !!list_empty(&conn->resend_queue);
|
||||
__entry->recv_seq = atomic64_read(&conn->recv_seq);
|
||||
),
|
||||
TP_printk(SCSBF" flags %s rc_dl %lu cto %lu sk %p rid %llu grid %llu sn "SI4_FMT" pn "SI4_FMT" eah %u lc %p eal %u nss %llu nsi %llu esq %u erq %u rs %llu",
|
||||
TP_printk(SCSBF" flags %s rc_dl %lu cto %lu sk %p rid %llu grid %llu sn "SIN_FMT" pn "SIN_FMT" eah %u lc %p eal %u nss %llu nsi %llu esq %u erq %u rs %llu",
|
||||
SCSB_TRACE_ARGS,
|
||||
print_conn_flags(__entry->flags),
|
||||
__entry->reconn_deadline,
|
||||
@@ -1280,8 +1456,8 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
|
||||
__entry->sock,
|
||||
__entry->c_rid,
|
||||
__entry->greeting_id,
|
||||
si4_trace_args(sockname),
|
||||
si4_trace_args(peername),
|
||||
&__entry->sockname,
|
||||
&__entry->peername,
|
||||
__entry->e_accepted_head,
|
||||
__entry->listening_conn,
|
||||
__entry->e_accepted_list,
|
||||
@@ -1445,28 +1621,6 @@ DEFINE_EVENT(scoutfs_work_class, scoutfs_data_return_server_extents_exit,
|
||||
TP_ARGS(sb, data, ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_shrink_exit_class,
|
||||
TP_PROTO(struct super_block *sb, unsigned long nr_to_scan, int ret),
|
||||
TP_ARGS(sb, nr_to_scan, ret),
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, sb)
|
||||
__field(unsigned long, nr_to_scan)
|
||||
__field(int, ret)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->sb = sb;
|
||||
__entry->nr_to_scan = nr_to_scan;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
TP_printk("sb %p nr_to_scan %lu ret %d",
|
||||
__entry->sb, __entry->nr_to_scan, __entry->ret)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_shrink_exit_class, scoutfs_lock_shrink_exit,
|
||||
TP_PROTO(struct super_block *sb, unsigned long nr_to_scan, int ret),
|
||||
TP_ARGS(sb, nr_to_scan, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_rename,
|
||||
TP_PROTO(struct super_block *sb, struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
@@ -3099,6 +3253,24 @@ TRACE_EVENT(scoutfs_ioc_search_xattrs,
|
||||
__entry->ino, __entry->last_ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trigger_fired,
|
||||
TP_PROTO(struct super_block *sb, const char *name),
|
||||
|
||||
TP_ARGS(sb, name),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(const char *, name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->name = name;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" %s", SCSB_TRACE_ARGS, __entry->name)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_SCOUTFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include "recov.h"
|
||||
#include "omap.h"
|
||||
#include "fence.h"
|
||||
#include "triggers.h"
|
||||
|
||||
/*
|
||||
* Every active mount can act as the server that listens on a net
|
||||
@@ -994,10 +995,11 @@ static int for_each_rid_last_lt(struct super_block *sb, struct scoutfs_btree_roo
|
||||
}
|
||||
|
||||
/*
|
||||
* Log merge range items are stored at the starting fs key of the range.
|
||||
* The only fs key field that doesn't hold information is the zone, so
|
||||
* we use the zone to differentiate all types that we store in the log
|
||||
* merge tree.
|
||||
* Log merge range items are stored at the starting fs key of the range
|
||||
* with the zone overwritten to indicate the log merge item type. This
|
||||
* day0 mistake loses sorting information for items in the different
|
||||
* zones in the fs root, so the range items aren't strictly sorted by
|
||||
* the starting key of their range.
|
||||
*/
|
||||
static void init_log_merge_key(struct scoutfs_key *key, u8 zone, u64 first,
|
||||
u64 second)
|
||||
@@ -1029,6 +1031,51 @@ static int next_log_merge_item_key(struct super_block *sb, struct scoutfs_btree_
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The range items aren't sorted by their range.start because
|
||||
* _RANGE_ZONE clobbers the range's zone. We sweep all the items and
|
||||
* find the range with the next least starting key that's greater than
|
||||
* the caller's starting key. We have to be careful to iterate over the
|
||||
* log_merge tree keys because the ranges can overlap as they're mapped
|
||||
* to the log_merge keys by clobbering their zone.
|
||||
*/
|
||||
static int next_log_merge_range(struct super_block *sb, struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *start, struct scoutfs_log_merge_range *rng)
|
||||
{
|
||||
struct scoutfs_log_merge_range *next;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
key = *start;
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_RANGE_ZONE;
|
||||
scoutfs_key_set_ones(&rng->start);
|
||||
|
||||
do {
|
||||
ret = scoutfs_btree_next(sb, root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.key->sk_zone != SCOUTFS_LOG_MERGE_RANGE_ZONE) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len != sizeof(struct scoutfs_log_merge_range)) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
next = iref.val;
|
||||
if (scoutfs_key_compare(&next->start, &rng->start) < 0 &&
|
||||
scoutfs_key_compare(&next->start, start) >= 0)
|
||||
*rng = *next;
|
||||
key = *iref.key;
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
} while (ret == 0);
|
||||
|
||||
if (ret == -ENOENT && !scoutfs_key_is_ones(&rng->start))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int next_log_merge_item(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
u8 zone, u64 first, u64 second,
|
||||
@@ -1245,9 +1292,13 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
* meta was low so that deleted items are merged
|
||||
* promptly and freed blocks can bring the client out of
|
||||
* enospc.
|
||||
*
|
||||
* The trigger can be used to force a log merge in cases where
|
||||
* a test only generates small amounts of change.
|
||||
*/
|
||||
finalize_ours = (lt->item_root.height > 2) ||
|
||||
(le32_to_cpu(lt->meta_avail.flags) & SCOUTFS_ALLOC_FLAG_LOW);
|
||||
(le32_to_cpu(lt->meta_avail.flags) & SCOUTFS_ALLOC_FLAG_LOW) ||
|
||||
scoutfs_trigger(sb, LOG_MERGE_FORCE_FINALIZE_OURS);
|
||||
|
||||
trace_scoutfs_server_finalize_decision(sb, rid, saw_finalized, others_active,
|
||||
ours_visible, finalize_ours, delay_ms,
|
||||
@@ -1356,6 +1407,8 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, log_merge_start);
|
||||
|
||||
/* we're done, caller can make forward progress */
|
||||
break;
|
||||
}
|
||||
@@ -1572,7 +1625,8 @@ static int server_get_log_trees(struct super_block *sb,
|
||||
goto update;
|
||||
}
|
||||
|
||||
ret = alloc_move_empty(sb, &super->data_alloc, <.data_freed, 100);
|
||||
ret = alloc_move_empty(sb, &super->data_alloc, <.data_freed,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2);
|
||||
if (ret == -EINPROGRESS)
|
||||
ret = 0;
|
||||
if (ret < 0) {
|
||||
@@ -1682,6 +1736,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
int ret;
|
||||
|
||||
if (arg_len != sizeof(struct scoutfs_log_trees)) {
|
||||
err_str = "invalid message log_trees size";
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -1745,7 +1800,7 @@ static int server_commit_log_trees(struct super_block *sb,
|
||||
|
||||
ret = scoutfs_btree_update(sb, &server->alloc, &server->wri,
|
||||
&super->logs_root, &key, <, sizeof(lt));
|
||||
BUG_ON(ret < 0); /* dirtying should have guaranteed success */
|
||||
BUG_ON(ret < 0); /* dirtying should have guaranteed success, srch item inconsistent */
|
||||
if (ret < 0)
|
||||
err_str = "updating log trees item";
|
||||
|
||||
@@ -1753,11 +1808,10 @@ unlock:
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
out:
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "server error %d committing client logs for rid %016llx, nr %llu: %s",
|
||||
ret, rid, le64_to_cpu(lt.nr), err_str);
|
||||
out:
|
||||
WARN_ON_ONCE(ret < 0);
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
|
||||
}
|
||||
|
||||
@@ -1867,9 +1921,11 @@ static int reclaim_open_log_tree(struct super_block *sb, u64 rid)
|
||||
scoutfs_alloc_splice_list(sb, &server->alloc, &server->wri, server->other_freed,
|
||||
<.meta_avail)) ?:
|
||||
(err_str = "empty data_avail",
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_avail, 100)) ?:
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_avail,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2)) ?:
|
||||
(err_str = "empty data_freed",
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_freed, 100));
|
||||
alloc_move_empty(sb, &super->data_alloc, <.data_freed,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2));
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
|
||||
/* only finalize, allowing merging, once the allocators are fully freed */
|
||||
@@ -2094,7 +2150,7 @@ static int server_srch_get_compact(struct super_block *sb,
|
||||
|
||||
apply:
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
WARN_ON_ONCE(ret < 0 && ret != -ENOENT); /* XXX leaked busy item */
|
||||
WARN_ON_ONCE(ret < 0 && ret != -ENOENT && ret != -ENOLINK); /* XXX leaked busy item */
|
||||
out:
|
||||
ret = scoutfs_net_response(sb, conn, cmd, id, ret,
|
||||
sc, sizeof(struct scoutfs_srch_compact));
|
||||
@@ -2460,6 +2516,8 @@ static int splice_log_merge_completions(struct super_block *sb,
|
||||
queue_work(server->wq, &server->log_merge_free_work);
|
||||
else
|
||||
err_str = "deleting merge status item";
|
||||
|
||||
scoutfs_inc_counter(sb, log_merge_complete);
|
||||
out:
|
||||
if (upd_stat) {
|
||||
init_log_merge_key(&key, SCOUTFS_LOG_MERGE_STATUS_ZONE, 0, 0);
|
||||
@@ -2472,10 +2530,9 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "server error %d splicing log merge completion: %s", ret, err_str);
|
||||
|
||||
BUG_ON(ret); /* inconsistent */
|
||||
/* inconsistent */
|
||||
scoutfs_bug_on_err(sb, ret,
|
||||
"server error %d splicing log merge completion: %s", ret, err_str);
|
||||
|
||||
return ret ?: einprogress;
|
||||
}
|
||||
@@ -2720,10 +2777,7 @@ restart:
|
||||
|
||||
/* find the next range, always checking for splicing */
|
||||
for (;;) {
|
||||
key = stat.next_range_key;
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_RANGE_ZONE;
|
||||
ret = next_log_merge_item_key(sb, &super->log_merge, SCOUTFS_LOG_MERGE_RANGE_ZONE,
|
||||
&key, &rng, sizeof(rng));
|
||||
ret = next_log_merge_range(sb, &super->log_merge, &stat.next_range_key, &rng);
|
||||
if (ret < 0 && ret != -ENOENT) {
|
||||
err_str = "finding merge range item";
|
||||
goto out;
|
||||
@@ -2994,7 +3048,13 @@ static int server_commit_log_merge(struct super_block *sb,
|
||||
SCOUTFS_LOG_MERGE_STATUS_ZONE, 0, 0,
|
||||
&stat, sizeof(stat));
|
||||
if (ret < 0) {
|
||||
err_str = "getting merge status item";
|
||||
/*
|
||||
* During a retransmission, it's possible that the server
|
||||
* already committed and resolved this log merge. ENOENT
|
||||
* is expected in that case.
|
||||
*/
|
||||
if (ret != -ENOENT)
|
||||
err_str = "getting merge status item";
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -3493,7 +3553,7 @@ static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
|
||||
* it's acceptable to see -EEXIST.
|
||||
*/
|
||||
static int insert_mounted_client(struct super_block *sb, u64 rid, u64 gr_flags,
|
||||
struct sockaddr_in *sin)
|
||||
struct sockaddr_storage *sin)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_super_block *super = DIRTY_SUPER_SB(sb);
|
||||
@@ -4246,7 +4306,7 @@ static void fence_pending_recov_worker(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_fence_start(sb, rid, le32_to_be32(addr.v4.addr),
|
||||
ret = scoutfs_fence_start(sb, rid, &addr,
|
||||
SCOUTFS_FENCE_CLIENT_RECOVERY);
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "fence returned err %d, shutting down server", ret);
|
||||
@@ -4397,7 +4457,7 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
struct scoutfs_net_connection *conn = NULL;
|
||||
struct scoutfs_mount_options opts;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_storage sin;
|
||||
bool alloc_init = false;
|
||||
u64 max_seq;
|
||||
int ret;
|
||||
@@ -4406,7 +4466,7 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
scoutfs_quorum_slot_sin(&server->qconf, opts.quorum_slot_nr, &sin);
|
||||
scoutfs_info(sb, "server starting at "SIN_FMT, SIN_ARG(&sin));
|
||||
scoutfs_info(sb, "server starting at "SIN_FMT, &sin);
|
||||
|
||||
scoutfs_block_writer_init(sb, &server->wri);
|
||||
server->finalize_sent_seq = 0;
|
||||
|
||||
@@ -1,27 +1,6 @@
|
||||
#ifndef _SCOUTFS_SERVER_H_
|
||||
#define _SCOUTFS_SERVER_H_
|
||||
|
||||
#define SI4_FMT "%u.%u.%u.%u:%u"
|
||||
|
||||
#define si4_trace_define(name) \
|
||||
__field(__u32, name##_addr) \
|
||||
__field(__u16, name##_port)
|
||||
|
||||
#define si4_trace_assign(name, sin) \
|
||||
do { \
|
||||
__typeof__(sin) _sin = (sin); \
|
||||
\
|
||||
__entry->name##_addr = be32_to_cpu(_sin->sin_addr.s_addr); \
|
||||
__entry->name##_port = be16_to_cpu(_sin->sin_port); \
|
||||
} while(0)
|
||||
|
||||
#define si4_trace_args(name) \
|
||||
(__entry->name##_addr >> 24), \
|
||||
(__entry->name##_addr >> 16) & 255, \
|
||||
(__entry->name##_addr >> 8) & 255, \
|
||||
__entry->name##_addr & 255, \
|
||||
__entry->name##_port
|
||||
|
||||
#define SNH_FMT \
|
||||
"seq %llu recv_seq %llu id %llu data_len %u cmd %u flags 0x%x error %u"
|
||||
#define SNH_ARG(nh) \
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
|
||||
#include "super.h"
|
||||
#include "triggers.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
* We have debugfs files we can write to which arm triggers which
|
||||
@@ -39,6 +40,7 @@ struct scoutfs_triggers {
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_LOG_MERGE_FORCE_FINALIZE_OURS] = "log_merge_force_finalize_ours",
|
||||
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||
@@ -51,6 +53,7 @@ bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)
|
||||
atomic_t *atom;
|
||||
int old;
|
||||
int mem;
|
||||
bool fired;
|
||||
|
||||
BUG_ON(t >= SCOUTFS_TRIGGER_NR);
|
||||
atom = &triggers->atomics[t];
|
||||
@@ -64,7 +67,12 @@ bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)
|
||||
mem = atomic_cmpxchg(atom, old, 0);
|
||||
} while (mem && mem != old);
|
||||
|
||||
return !!mem;
|
||||
fired = !!mem;
|
||||
|
||||
if (fired)
|
||||
trace_scoutfs_trigger_fired(sb, names[t]);
|
||||
|
||||
return fired;
|
||||
}
|
||||
|
||||
int scoutfs_setup_triggers(struct super_block *sb)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_LOG_MERGE_FORCE_FINALIZE_OURS,
|
||||
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||
|
||||
@@ -117,6 +117,7 @@ used during the test.
|
||||
| T\_NR\_MOUNTS | number of mounts | -n | 3 |
|
||||
| T\_O[0-9] | mount options | created per run | -o server\_addr= |
|
||||
| T\_QUORUM | quorum count | -q | 2 |
|
||||
| T\_EXTRA | per-test file dir | revision ctled | tests/extra/t |
|
||||
| T\_TMP | per-test tmp prefix | made for test | results/tmp/t/tmp |
|
||||
| T\_TMPDIR | per-test tmp dir dir | made for test | results/tmp/t |
|
||||
|
||||
|
||||
882
tests/extra/xfstests/expected-results
Normal file
882
tests/extra/xfstests/expected-results
Normal file
@@ -0,0 +1,882 @@
|
||||
Ran:
|
||||
generic/001
|
||||
generic/002
|
||||
generic/004
|
||||
generic/005
|
||||
generic/006
|
||||
generic/007
|
||||
generic/008
|
||||
generic/009
|
||||
generic/011
|
||||
generic/012
|
||||
generic/013
|
||||
generic/014
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/020
|
||||
generic/021
|
||||
generic/022
|
||||
generic/023
|
||||
generic/024
|
||||
generic/025
|
||||
generic/026
|
||||
generic/028
|
||||
generic/029
|
||||
generic/030
|
||||
generic/031
|
||||
generic/032
|
||||
generic/033
|
||||
generic/034
|
||||
generic/035
|
||||
generic/037
|
||||
generic/039
|
||||
generic/040
|
||||
generic/041
|
||||
generic/050
|
||||
generic/052
|
||||
generic/053
|
||||
generic/056
|
||||
generic/057
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/062
|
||||
generic/063
|
||||
generic/064
|
||||
generic/065
|
||||
generic/066
|
||||
generic/067
|
||||
generic/069
|
||||
generic/070
|
||||
generic/071
|
||||
generic/073
|
||||
generic/076
|
||||
generic/078
|
||||
generic/079
|
||||
generic/080
|
||||
generic/081
|
||||
generic/082
|
||||
generic/084
|
||||
generic/086
|
||||
generic/087
|
||||
generic/088
|
||||
generic/090
|
||||
generic/091
|
||||
generic/092
|
||||
generic/094
|
||||
generic/096
|
||||
generic/097
|
||||
generic/098
|
||||
generic/099
|
||||
generic/101
|
||||
generic/104
|
||||
generic/105
|
||||
generic/106
|
||||
generic/107
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/117
|
||||
generic/118
|
||||
generic/119
|
||||
generic/120
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/124
|
||||
generic/126
|
||||
generic/128
|
||||
generic/129
|
||||
generic/130
|
||||
generic/131
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/141
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/169
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/184
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/215
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/221
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/228
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/236
|
||||
generic/237
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/245
|
||||
generic/246
|
||||
generic/247
|
||||
generic/248
|
||||
generic/249
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/257
|
||||
generic/258
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/286
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/294
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/306
|
||||
generic/307
|
||||
generic/308
|
||||
generic/309
|
||||
generic/312
|
||||
generic/313
|
||||
generic/314
|
||||
generic/315
|
||||
generic/316
|
||||
generic/317
|
||||
generic/319
|
||||
generic/322
|
||||
generic/324
|
||||
generic/325
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/335
|
||||
generic/336
|
||||
generic/337
|
||||
generic/341
|
||||
generic/342
|
||||
generic/343
|
||||
generic/346
|
||||
generic/348
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/360
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/375
|
||||
generic/376
|
||||
generic/377
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/389
|
||||
generic/391
|
||||
generic/392
|
||||
generic/393
|
||||
generic/394
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/401
|
||||
generic/402
|
||||
generic/403
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/426
|
||||
generic/427
|
||||
generic/428
|
||||
generic/436
|
||||
generic/437
|
||||
generic/439
|
||||
generic/440
|
||||
generic/443
|
||||
generic/445
|
||||
generic/446
|
||||
generic/448
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/452
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/460
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/477
|
||||
generic/478
|
||||
generic/479
|
||||
generic/480
|
||||
generic/481
|
||||
generic/483
|
||||
generic/485
|
||||
generic/486
|
||||
generic/487
|
||||
generic/488
|
||||
generic/489
|
||||
generic/490
|
||||
generic/491
|
||||
generic/492
|
||||
generic/498
|
||||
generic/499
|
||||
generic/501
|
||||
generic/502
|
||||
generic/503
|
||||
generic/504
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/509
|
||||
generic/510
|
||||
generic/511
|
||||
generic/512
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/523
|
||||
generic/524
|
||||
generic/525
|
||||
generic/526
|
||||
generic/527
|
||||
generic/528
|
||||
generic/529
|
||||
generic/530
|
||||
generic/531
|
||||
generic/533
|
||||
generic/534
|
||||
generic/535
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/547
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/557
|
||||
generic/566
|
||||
generic/567
|
||||
generic/571
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/604
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/611
|
||||
generic/612
|
||||
generic/613
|
||||
generic/614
|
||||
generic/618
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/632
|
||||
generic/634
|
||||
generic/635
|
||||
generic/637
|
||||
generic/638
|
||||
generic/639
|
||||
generic/640
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/676
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Not
|
||||
run:
|
||||
generic/008
|
||||
generic/009
|
||||
generic/012
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/021
|
||||
generic/022
|
||||
generic/025
|
||||
generic/026
|
||||
generic/031
|
||||
generic/033
|
||||
generic/050
|
||||
generic/052
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/063
|
||||
generic/064
|
||||
generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
generic/091
|
||||
generic/094
|
||||
generic/096
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/118
|
||||
generic/119
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/128
|
||||
generic/130
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/312
|
||||
generic/314
|
||||
generic/316
|
||||
generic/317
|
||||
generic/324
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/391
|
||||
generic/392
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/402
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/427
|
||||
generic/439
|
||||
generic/440
|
||||
generic/446
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/485
|
||||
generic/487
|
||||
generic/488
|
||||
generic/491
|
||||
generic/492
|
||||
generic/499
|
||||
generic/501
|
||||
generic/503
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/511
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/528
|
||||
generic/530
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/566
|
||||
generic/567
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/612
|
||||
generic/613
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/635
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Passed all 512 tests
|
||||
44
tests/extra/xfstests/local.exclude
Normal file
44
tests/extra/xfstests/local.exclude
Normal file
@@ -0,0 +1,44 @@
|
||||
generic/003 # missing atime update in buffered read
|
||||
generic/075 # file content mismatch failures (fds, etc)
|
||||
generic/103 # enospc causes trans commit failures
|
||||
generic/108 # mount fails on failing device?
|
||||
generic/112 # file content mismatch failures (fds, etc)
|
||||
generic/213 # enospc causes trans commit failures
|
||||
generic/318 # can't support user namespaces until v5.11
|
||||
generic/321 # requires selinux enabled for '+' in ls?
|
||||
generic/338 # BUG_ON update inode error handling
|
||||
generic/347 # _dmthin_mount doesn't work?
|
||||
generic/356 # swap
|
||||
generic/357 # swap
|
||||
generic/409 # bind mounts not scripted yet
|
||||
generic/410 # bind mounts not scripted yet
|
||||
generic/411 # bind mounts not scripted yet
|
||||
generic/423 # symlink inode size is strlen() + 1 on scoutfs
|
||||
generic/430 # xfs_io copy_range missing in el7
|
||||
generic/431 # xfs_io copy_range missing in el7
|
||||
generic/432 # xfs_io copy_range missing in el7
|
||||
generic/433 # xfs_io copy_range missing in el7
|
||||
generic/434 # xfs_io copy_range missing in el7
|
||||
generic/441 # dm-mapper
|
||||
generic/444 # el9's posix_acl_update_mode is buggy ?
|
||||
generic/467 # open_by_handle ESTALE
|
||||
generic/472 # swap
|
||||
generic/484 # dm-mapper
|
||||
generic/493 # swap
|
||||
generic/494 # swap
|
||||
generic/495 # swap
|
||||
generic/496 # swap
|
||||
generic/497 # swap
|
||||
generic/532 # xfs_io statx attrib_mask missing in el7
|
||||
generic/554 # swap
|
||||
generic/563 # cgroup+loopdev
|
||||
generic/564 # xfs_io copy_range missing in el7
|
||||
generic/565 # xfs_io copy_range missing in el7
|
||||
generic/568 # falloc not resulting in block count increase
|
||||
generic/569 # swap
|
||||
generic/570 # swap
|
||||
generic/620 # dm-hugedisk
|
||||
generic/633 # id-mapped mounts missing in el7
|
||||
generic/636 # swap
|
||||
generic/641 # swap
|
||||
generic/643 # swap
|
||||
@@ -8,36 +8,33 @@
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
log() {
|
||||
echo "$@" > /dev/stderr
|
||||
echo_fail() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
# silence error messages
|
||||
quiet_cat()
|
||||
{
|
||||
cat "$@" 2>/dev/null
|
||||
}
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
shopt -s nullglob
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
[ ! -d "$fs" ] && continue
|
||||
fs_rid="$(quiet_cat $fs/rid)"
|
||||
nr="$(quiet_cat $fs/data_device_maj_min)"
|
||||
[ ! -d "$fs" -o "$fs_rid" != "$rid" ] && continue
|
||||
|
||||
fs_rid="$(cat $fs/rid)" || \
|
||||
echo_fail "failed to get rid in $fs"
|
||||
if [ "$fs_rid" != "$rid" ]; then
|
||||
continue
|
||||
mnt=$(findmnt -l -n -t scoutfs -o TARGET -S $nr)
|
||||
[ -z "$mnt" ] && continue
|
||||
|
||||
if ! umount -qf "$mnt"; then
|
||||
if [ -d "$fs" ]; then
|
||||
echo_fail "umount -qf $mnt failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
nr="$(cat $fs/data_device_maj_min)" || \
|
||||
echo_fail "failed to get data device major:minor in $fs"
|
||||
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET -S $nr) || \
|
||||
echo_fail "findmnt -t scoutfs -S $nr failed"
|
||||
for mnt in $mnts; do
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt failed"
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -64,21 +64,27 @@ t_rc()
|
||||
}
|
||||
|
||||
#
|
||||
# redirect test output back to the output of the invoking script intead
|
||||
# of the compared output.
|
||||
# As run, stdout/err are redirected to a file that will be compared with
|
||||
# the stored expected golden output of the test. This redirects
|
||||
# stdout/err in the script to stdout of the invoking run-test. It's
|
||||
# intended to give visible output of tests without being included in the
|
||||
# golden output.
|
||||
#
|
||||
t_restore_output()
|
||||
# (see the goofy "exec" fd manipulation in the main run-tests as it runs
|
||||
# each test)
|
||||
#
|
||||
t_stdout_invoked()
|
||||
{
|
||||
exec >&6 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# redirect a command's output back to the compared output after the
|
||||
# test has restored its output
|
||||
# This undoes t_stdout_invokved, returning the test's stdout/err to the
|
||||
# output file as it was when it was launched.
|
||||
#
|
||||
t_compare_output()
|
||||
t_stdout_compare()
|
||||
{
|
||||
"$@" >&7 2>&1
|
||||
exec >&7 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
|
||||
@@ -121,6 +121,7 @@ t_filter_dmesg()
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
re="$re|clocksource: Long readout interval"
|
||||
|
||||
# fencing tests force unmounts and trigger timeouts
|
||||
re="$re|scoutfs .* forcing unmount"
|
||||
@@ -166,6 +167,12 @@ t_filter_dmesg()
|
||||
# perf warning that it adjusted sample rate
|
||||
re="$re|perf: interrupt took too long.*lowering kernel.perf_event_max_sample_rate.*"
|
||||
|
||||
# some ci test guests are unresponsive
|
||||
re="$re|longest quorum heartbeat .* delay"
|
||||
|
||||
# creating block devices may trigger this
|
||||
re="$re|block device autoloading is deprecated and will be removed."
|
||||
|
||||
egrep -v "($re)" | \
|
||||
ignore_harmless_unwind_kasan_stack_oob
|
||||
}
|
||||
|
||||
@@ -283,6 +283,30 @@ t_reinsert_remount_all()
|
||||
t_quiet t_mount_all || t_fail "mounting all failed"
|
||||
}
|
||||
|
||||
#
|
||||
# scratch helpers
|
||||
#
|
||||
t_scratch_mkfs()
|
||||
{
|
||||
scoutfs mkfs -f -Q 0,127.0.0.1,$T_SCRATCH_PORT "$T_EX_META_DEV" "$T_EX_DATA_DEV" "$@" > $T_TMP.mkfs.out 2>&1 || \
|
||||
t_fail "scratch mkfs failed"
|
||||
}
|
||||
|
||||
t_scratch_mount()
|
||||
{
|
||||
mkdir -p "$T_MSCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$@" "$T_EX_DATA_DEV" "$T_MSCR" || \
|
||||
t_fail "scratch mount failed"
|
||||
}
|
||||
|
||||
t_scratch_umount()
|
||||
{
|
||||
umount "$T_MSCR" || \
|
||||
t_fail "scratch umount failed"
|
||||
rmdir "$T_MSCR"
|
||||
}
|
||||
|
||||
|
||||
t_trigger_path() {
|
||||
local nr="$1"
|
||||
|
||||
@@ -498,3 +522,121 @@ t_restore_all_sysfs_mount_options() {
|
||||
t_set_sysfs_mount_option $i $name "${_saved_opts[$ind]}"
|
||||
done
|
||||
}
|
||||
|
||||
t_force_log_merge() {
|
||||
local sv=$(t_server_nr)
|
||||
local merges_started
|
||||
local last_merges_started
|
||||
local merges_completed
|
||||
local last_merges_completed
|
||||
|
||||
while true; do
|
||||
last_merges_started=$(t_counter log_merge_start $sv)
|
||||
last_merges_completed=$(t_counter log_merge_complete $sv)
|
||||
|
||||
t_trigger_arm_silent log_merge_force_finalize_ours $sv
|
||||
|
||||
t_sync_seq_index
|
||||
|
||||
while test "$(t_trigger_get log_merge_force_finalize_ours $sv)" == "1"; do
|
||||
sleep .5
|
||||
done
|
||||
|
||||
merges_started=$(t_counter log_merge_start $sv)
|
||||
|
||||
if (( merges_started > last_merges_started )); then
|
||||
merges_completed=$(t_counter log_merge_complete $sv)
|
||||
|
||||
while (( merges_completed == last_merges_completed )); do
|
||||
sleep .5
|
||||
merges_completed=$(t_counter log_merge_complete $sv)
|
||||
done
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _last_scan
|
||||
t_get_orphan_scan_runs() {
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
_last_scan[$i]=$(t_counter orphan_scan $i)
|
||||
done
|
||||
}
|
||||
|
||||
t_wait_for_orphan_scan_runs() {
|
||||
local i
|
||||
local scan
|
||||
|
||||
t_get_orphan_scan_runs
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
while true; do
|
||||
scan=$(t_counter orphan_scan $i)
|
||||
if (( scan != _last_scan[$i] )); then
|
||||
break
|
||||
fi
|
||||
sleep .5
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _last_empty
|
||||
t_get_orphan_scan_empty() {
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
_last_empty[$i]=$(t_counter orphan_scan_empty $i)
|
||||
done
|
||||
}
|
||||
|
||||
t_wait_for_no_orphans() {
|
||||
local i;
|
||||
local working;
|
||||
local empty;
|
||||
|
||||
t_get_orphan_scan_empty
|
||||
|
||||
while true; do
|
||||
working=0
|
||||
|
||||
t_wait_for_orphan_scan_runs
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
empty=$(t_counter orphan_scan_empty $i)
|
||||
if (( empty == _last_empty[$i] )); then
|
||||
(( working++ ))
|
||||
else
|
||||
(( _last_empty[$i] = empty ))
|
||||
fi
|
||||
done
|
||||
|
||||
if (( working == 0 )); then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Repeatedly run the arguments as a command, sleeping in between, until
|
||||
# it returns success. The first argument is a relative timeout in
|
||||
# seconds. The remaining arguments are the command and its arguments.
|
||||
#
|
||||
# If the timeout expires without the command returning 0 then the test
|
||||
# fails.
|
||||
#
|
||||
t_wait_until_timeout() {
|
||||
local relative="$1"
|
||||
local expire="$((SECONDS + relative))"
|
||||
shift
|
||||
|
||||
while (( SECONDS < expire )); do
|
||||
"$@" && return
|
||||
sleep 1
|
||||
done
|
||||
|
||||
t_fail "command failed for $relative sec: $@"
|
||||
}
|
||||
|
||||
@@ -43,9 +43,14 @@ t_tap_progress()
|
||||
local testname=$1
|
||||
local result=$2
|
||||
|
||||
local stmsg=""
|
||||
local diff=""
|
||||
local dmsg=""
|
||||
|
||||
if [[ -s $T_RESULTS/tmp/${testname}/status.msg ]]; then
|
||||
stmsg="1"
|
||||
fi
|
||||
|
||||
if [[ -s "$T_RESULTS/tmp/${testname}/dmesg.new" ]]; then
|
||||
dmsg="1"
|
||||
fi
|
||||
@@ -61,6 +66,7 @@ t_tap_progress()
|
||||
echo "# ${testname} ** skipped - permitted **"
|
||||
else
|
||||
echo "not ok ${i} - ${testname}"
|
||||
|
||||
case ${result} in
|
||||
101)
|
||||
echo "# ${testname} ** skipped **"
|
||||
@@ -70,6 +76,13 @@ t_tap_progress()
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n "${stmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# status:"
|
||||
echo "#"
|
||||
cat $T_RESULTS/tmp/${testname}/status.msg | sed 's/^/# - /'
|
||||
fi
|
||||
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "#"
|
||||
echo "# diff:"
|
||||
|
||||
6
tests/golden/basic-acl-consistency
Normal file
6
tests/golden/basic-acl-consistency
Normal file
@@ -0,0 +1,6 @@
|
||||
== make scratch fs
|
||||
== create uid/gids
|
||||
== set acls and permissions
|
||||
== compare output
|
||||
== drop caches and compare again
|
||||
== cleanup scratch fs
|
||||
@@ -17,7 +17,7 @@ ino not found in dseq index
|
||||
mount 0 contents after mount 1 rm: contents
|
||||
ino found in dseq index
|
||||
ino found in dseq index
|
||||
stat: cannot stat '/mnt/test/test/inode-deletion/file': No such file or directory
|
||||
stat: cannot stat '/mnt/test/test/inode-deletion/badfile': No such file or directory
|
||||
ino not found in dseq index
|
||||
ino not found in dseq index
|
||||
== lots of deletions use one open map
|
||||
|
||||
@@ -1,882 +0,0 @@
|
||||
Ran:
|
||||
generic/001
|
||||
generic/002
|
||||
generic/004
|
||||
generic/005
|
||||
generic/006
|
||||
generic/007
|
||||
generic/008
|
||||
generic/009
|
||||
generic/011
|
||||
generic/012
|
||||
generic/013
|
||||
generic/014
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/020
|
||||
generic/021
|
||||
generic/022
|
||||
generic/023
|
||||
generic/024
|
||||
generic/025
|
||||
generic/026
|
||||
generic/028
|
||||
generic/029
|
||||
generic/030
|
||||
generic/031
|
||||
generic/032
|
||||
generic/033
|
||||
generic/034
|
||||
generic/035
|
||||
generic/037
|
||||
generic/039
|
||||
generic/040
|
||||
generic/041
|
||||
generic/050
|
||||
generic/052
|
||||
generic/053
|
||||
generic/056
|
||||
generic/057
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/062
|
||||
generic/063
|
||||
generic/064
|
||||
generic/065
|
||||
generic/066
|
||||
generic/067
|
||||
generic/069
|
||||
generic/070
|
||||
generic/071
|
||||
generic/073
|
||||
generic/076
|
||||
generic/078
|
||||
generic/079
|
||||
generic/080
|
||||
generic/081
|
||||
generic/082
|
||||
generic/084
|
||||
generic/086
|
||||
generic/087
|
||||
generic/088
|
||||
generic/090
|
||||
generic/091
|
||||
generic/092
|
||||
generic/094
|
||||
generic/096
|
||||
generic/097
|
||||
generic/098
|
||||
generic/099
|
||||
generic/101
|
||||
generic/104
|
||||
generic/105
|
||||
generic/106
|
||||
generic/107
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/117
|
||||
generic/118
|
||||
generic/119
|
||||
generic/120
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/124
|
||||
generic/126
|
||||
generic/128
|
||||
generic/129
|
||||
generic/130
|
||||
generic/131
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/141
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/169
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/184
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/215
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/221
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/228
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/236
|
||||
generic/237
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/245
|
||||
generic/246
|
||||
generic/247
|
||||
generic/248
|
||||
generic/249
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/257
|
||||
generic/258
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/286
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/294
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/306
|
||||
generic/307
|
||||
generic/308
|
||||
generic/309
|
||||
generic/312
|
||||
generic/313
|
||||
generic/314
|
||||
generic/315
|
||||
generic/316
|
||||
generic/317
|
||||
generic/319
|
||||
generic/322
|
||||
generic/324
|
||||
generic/325
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/335
|
||||
generic/336
|
||||
generic/337
|
||||
generic/341
|
||||
generic/342
|
||||
generic/343
|
||||
generic/346
|
||||
generic/348
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/360
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/375
|
||||
generic/376
|
||||
generic/377
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/389
|
||||
generic/391
|
||||
generic/392
|
||||
generic/393
|
||||
generic/394
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/401
|
||||
generic/402
|
||||
generic/403
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/426
|
||||
generic/427
|
||||
generic/428
|
||||
generic/436
|
||||
generic/437
|
||||
generic/439
|
||||
generic/440
|
||||
generic/443
|
||||
generic/445
|
||||
generic/446
|
||||
generic/448
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/452
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/460
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/477
|
||||
generic/478
|
||||
generic/479
|
||||
generic/480
|
||||
generic/481
|
||||
generic/483
|
||||
generic/485
|
||||
generic/486
|
||||
generic/487
|
||||
generic/488
|
||||
generic/489
|
||||
generic/490
|
||||
generic/491
|
||||
generic/492
|
||||
generic/498
|
||||
generic/499
|
||||
generic/501
|
||||
generic/502
|
||||
generic/503
|
||||
generic/504
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/509
|
||||
generic/510
|
||||
generic/511
|
||||
generic/512
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/523
|
||||
generic/524
|
||||
generic/525
|
||||
generic/526
|
||||
generic/527
|
||||
generic/528
|
||||
generic/529
|
||||
generic/530
|
||||
generic/531
|
||||
generic/533
|
||||
generic/534
|
||||
generic/535
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/547
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/557
|
||||
generic/566
|
||||
generic/567
|
||||
generic/571
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/604
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/611
|
||||
generic/612
|
||||
generic/613
|
||||
generic/614
|
||||
generic/618
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/632
|
||||
generic/634
|
||||
generic/635
|
||||
generic/637
|
||||
generic/638
|
||||
generic/639
|
||||
generic/640
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/676
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Not
|
||||
run:
|
||||
generic/008
|
||||
generic/009
|
||||
generic/012
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/021
|
||||
generic/022
|
||||
generic/025
|
||||
generic/026
|
||||
generic/031
|
||||
generic/033
|
||||
generic/050
|
||||
generic/052
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/063
|
||||
generic/064
|
||||
generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
generic/091
|
||||
generic/094
|
||||
generic/096
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/118
|
||||
generic/119
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/128
|
||||
generic/130
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/312
|
||||
generic/314
|
||||
generic/316
|
||||
generic/317
|
||||
generic/324
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/391
|
||||
generic/392
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/402
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/427
|
||||
generic/439
|
||||
generic/440
|
||||
generic/446
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/485
|
||||
generic/487
|
||||
generic/488
|
||||
generic/491
|
||||
generic/492
|
||||
generic/499
|
||||
generic/501
|
||||
generic/503
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/511
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/528
|
||||
generic/530
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/566
|
||||
generic/567
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/612
|
||||
generic/613
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/635
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Passed all 512 tests
|
||||
|
||||
@@ -56,6 +56,7 @@ $(basename $0) options:
|
||||
| only tests matching will be run. Can be provided multiple
|
||||
| times
|
||||
-i | Force removing and inserting the built scoutfs.ko module.
|
||||
-l <nr> | Loop each test <nr> times while passing, last run counts.
|
||||
-M <file> | Specify the filesystem's meta data device path that contains
|
||||
| the file system to be tested. Will be clobbered by -m mkfs.
|
||||
-m | Run mkfs on the device before mounting and running
|
||||
@@ -91,6 +92,7 @@ done
|
||||
T_TRACE_DUMP="0"
|
||||
T_TRACE_PRINTK="0"
|
||||
T_PORT_START="19700"
|
||||
T_LOOP_ITER="1"
|
||||
|
||||
# array declarations to be able to use array ops
|
||||
declare -a T_TRACE_GLOB
|
||||
@@ -131,6 +133,12 @@ while true; do
|
||||
-i)
|
||||
T_INSMOD="1"
|
||||
;;
|
||||
-l)
|
||||
test -n "$2" || die "-l must have a nr iterations argument"
|
||||
test "$2" -eq "$2" 2>/dev/null || die "-l <nr> argument must be an integer"
|
||||
T_LOOP_ITER="$2"
|
||||
shift
|
||||
;;
|
||||
-M)
|
||||
test -n "$2" || die "-z must have meta device file argument"
|
||||
T_META_DEVICE="$2"
|
||||
@@ -375,7 +383,7 @@ fi
|
||||
quo=""
|
||||
if [ -n "$T_MKFS" ]; then
|
||||
for i in $(seq -0 $((T_QUORUM - 1))); do
|
||||
quo="$quo -Q $i,127.0.0.1,$((T_TEST_PORT + i))"
|
||||
quo="$quo -Q $i,::1,$((T_TEST_PORT + i))"
|
||||
done
|
||||
|
||||
msg "making new filesystem with $T_QUORUM quorum members"
|
||||
@@ -392,7 +400,8 @@ if [ -n "$T_INSMOD" ]; then
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_MULT" ]; then
|
||||
orig_trace_size=$(cat /sys/kernel/debug/tracing/buffer_size_kb)
|
||||
# orig_trace_size=$(cat /sys/kernel/debug/tracing/buffer_size_kb)
|
||||
orig_trace_size=1408
|
||||
mult_trace_size=$((orig_trace_size * T_TRACE_MULT))
|
||||
msg "increasing trace buffer size from $orig_trace_size KiB to $mult_trace_size KiB"
|
||||
echo $mult_trace_size > /sys/kernel/debug/tracing/buffer_size_kb
|
||||
@@ -430,6 +439,30 @@ cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
/sys/kernel/debug/tracing/buffer_size_kb \
|
||||
/proc/sys/kernel/ftrace_dump_on_oops
|
||||
|
||||
# we can record pids to kill as we exit, we kill in reverse added order
|
||||
atexit_kill_pids=""
|
||||
add_atexit_kill_pid()
|
||||
{
|
||||
atexit_kill_pids="$1 $atexit_kill_pids"
|
||||
}
|
||||
atexit_kill()
|
||||
{
|
||||
local pid
|
||||
|
||||
# suppress bg function exited messages
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
|
||||
for pid in $atexit_kill_pids; do
|
||||
if test -e "/proc/$pid/status" ; then
|
||||
kill "$pid"
|
||||
wait "$pid"
|
||||
fi
|
||||
done
|
||||
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
}
|
||||
trap atexit_kill EXIT
|
||||
|
||||
#
|
||||
# Build a fenced config that runs scripts out of the repository rather
|
||||
# than the default system directory
|
||||
@@ -443,26 +476,46 @@ EOF
|
||||
export SCOUTFS_FENCED_CONFIG_FILE="$conf"
|
||||
T_FENCED_LOG="$T_RESULTS/fenced.log"
|
||||
|
||||
#
|
||||
# Run the agent in the background, log its output, an kill it if we
|
||||
# exit
|
||||
#
|
||||
fenced_log()
|
||||
{
|
||||
echo "[$(timestamp)] $*" >> "$T_FENCED_LOG"
|
||||
}
|
||||
fenced_pid=""
|
||||
kill_fenced()
|
||||
{
|
||||
if test -n "$fenced_pid" -a -d "/proc/$fenced_pid" ; then
|
||||
fenced_log "killing fenced pid $fenced_pid"
|
||||
kill "$fenced_pid"
|
||||
fi
|
||||
}
|
||||
trap kill_fenced EXIT
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_FENCED_LOG" 2>&1 &
|
||||
fenced_pid=$!
|
||||
fenced_log "started fenced pid $fenced_pid in the background"
|
||||
add_atexit_kill_pid $fenced_pid
|
||||
|
||||
#
|
||||
# some critical failures will cause fs operations to hang. We can watch
|
||||
# for evidence of them and cause the system to crash, at least.
|
||||
#
|
||||
crash_monitor()
|
||||
{
|
||||
local bad=0
|
||||
|
||||
while sleep 1; do
|
||||
if dmesg | grep -q "inserting extent.*overlaps existing"; then
|
||||
echo "run-tests monitor saw overlapping extent message"
|
||||
bad=1
|
||||
fi
|
||||
|
||||
if dmesg | grep -q "error indicated by fence action" ; then
|
||||
echo "run-tests monitor saw fence agent error message"
|
||||
bad=1
|
||||
fi
|
||||
|
||||
if [ ! -e "/proc/${fenced_pid}/status" ]; then
|
||||
echo "run-tests monitor didn't see fenced pid $fenced_pid /proc dir"
|
||||
bad=1
|
||||
fi
|
||||
|
||||
if [ "$bad" != 0 ]; then
|
||||
echo "run-tests monitor syncing and triggering crash"
|
||||
# hail mary, the sync could well hang
|
||||
(echo s > /proc/sysrq-trigger) &
|
||||
sleep 5
|
||||
echo c > /proc/sysrq-trigger
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
crash_monitor &
|
||||
add_atexit_kill_pid $!
|
||||
|
||||
# setup dm tables
|
||||
echo "0 $(blockdev --getsz $T_META_DEVICE) linear $T_META_DEVICE 0" > \
|
||||
@@ -535,7 +588,7 @@ fi
|
||||
. funcs/filter.sh
|
||||
|
||||
# give tests access to built binaries in src/, prefer over installed
|
||||
PATH="$PWD/src:$PATH"
|
||||
export PATH="$PWD/src:$PATH"
|
||||
|
||||
msg "running tests"
|
||||
> "$T_RESULTS/skip.log"
|
||||
@@ -555,101 +608,113 @@ for t in $tests; do
|
||||
t="tests/$t"
|
||||
test_name=$(basename "$t" | sed -e 's/.sh$//')
|
||||
|
||||
# create a temporary dir and file path for the test
|
||||
T_TMPDIR="$T_RESULTS/tmp/$test_name"
|
||||
T_TMP="$T_TMPDIR/tmp"
|
||||
cmd rm -rf "$T_TMPDIR"
|
||||
cmd mkdir -p "$T_TMPDIR"
|
||||
|
||||
# create a test name dir in the fs, clean up old data as needed
|
||||
T_DS=""
|
||||
for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="${T_M[$i]}/test/$test_name"
|
||||
|
||||
test $i == 0 && (
|
||||
test -d "$dir" && cmd rm -rf "$dir"
|
||||
cmd mkdir -p "$dir"
|
||||
)
|
||||
|
||||
eval T_D$i=$dir
|
||||
T_D[$i]=$dir
|
||||
T_DS+="$dir "
|
||||
done
|
||||
|
||||
# export all our T_ variables
|
||||
for v in ${!T_*}; do
|
||||
eval export $v
|
||||
done
|
||||
export PATH # give test access to scoutfs binary
|
||||
|
||||
# prepare to compare output to golden output
|
||||
test -e "$T_RESULTS/output" || cmd mkdir -p "$T_RESULTS/output"
|
||||
out="$T_RESULTS/output/$test_name"
|
||||
> "$T_TMPDIR/status.msg"
|
||||
golden="golden/$test_name"
|
||||
|
||||
# get stats from previous pass
|
||||
last="$T_RESULTS/last-passed-test-stats"
|
||||
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
|
||||
test -n "$stats" && stats="last: $stats"
|
||||
|
||||
printf " %-30s $stats" "$test_name"
|
||||
|
||||
# mark in dmesg as to what test we are running
|
||||
echo "run scoutfs test $test_name" > /dev/kmsg
|
||||
|
||||
# record dmesg before
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.before"
|
||||
# let the test get at its extra files
|
||||
T_EXTRA="$T_TESTS/extra/$test_name"
|
||||
|
||||
# give tests stdout and compared output on specific fds
|
||||
exec 6>&1
|
||||
exec 7>$out
|
||||
for iter in $(seq 1 $T_LOOP_ITER); do
|
||||
|
||||
# run the test with access to our functions
|
||||
start_secs=$SECONDS
|
||||
bash -c "for f in funcs/*.sh; do . \$f; done; . $t" >&7 2>&1
|
||||
sts="$?"
|
||||
log "test $t exited with status $sts"
|
||||
stats="$((SECONDS - start_secs))s"
|
||||
# create a temporary dir and file path for the test
|
||||
T_TMPDIR="$T_RESULTS/tmp/$test_name"
|
||||
T_TMP="$T_TMPDIR/tmp"
|
||||
cmd rm -rf "$T_TMPDIR"
|
||||
cmd mkdir -p "$T_TMPDIR"
|
||||
|
||||
# close our weird descriptors
|
||||
exec 6>&-
|
||||
exec 7>&-
|
||||
# assign scratch mount point in temporary dir
|
||||
T_MSCR="$T_TMPDIR/scratch"
|
||||
|
||||
# compare output if the test returned passed status
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
if [ ! -e "$golden" ]; then
|
||||
message="no golden output"
|
||||
sts=$T_FAIL_STATUS
|
||||
elif ! cmp -s "$golden" "$out"; then
|
||||
message="output differs"
|
||||
sts=$T_FAIL_STATUS
|
||||
diff -u "$golden" "$out" >> "$T_RESULTS/fail.log"
|
||||
# create a test name dir in the fs, clean up old data as needed
|
||||
T_DS=""
|
||||
for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="${T_M[$i]}/test/$test_name"
|
||||
|
||||
test $i == 0 && (
|
||||
test -d "$dir" && cmd rm -rf "$dir"
|
||||
cmd mkdir -p "$dir"
|
||||
)
|
||||
|
||||
eval T_D$i=$dir
|
||||
T_D[$i]=$dir
|
||||
T_DS+="$dir "
|
||||
done
|
||||
|
||||
# export all our T_ variables
|
||||
for v in ${!T_*}; do
|
||||
eval export $v
|
||||
done
|
||||
|
||||
# prepare to compare output to golden output
|
||||
test -e "$T_RESULTS/output" || cmd mkdir -p "$T_RESULTS/output"
|
||||
out="$T_RESULTS/output/$test_name"
|
||||
> "$T_TMPDIR/status.msg"
|
||||
golden="golden/$test_name"
|
||||
|
||||
# record dmesg before
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.before"
|
||||
|
||||
# give tests stdout and compared output on specific fds
|
||||
exec 6>&1
|
||||
exec 7>$out
|
||||
|
||||
# run the test with access to our functions
|
||||
start_secs=$SECONDS
|
||||
bash -c "for f in funcs/*.sh; do . \$f; done; . $t" >&7 2>&1
|
||||
sts="$?"
|
||||
log "test $t exited with status $sts"
|
||||
stats="$((SECONDS - start_secs))s"
|
||||
|
||||
# close our weird descriptors
|
||||
exec 6>&-
|
||||
exec 7>&-
|
||||
|
||||
# compare output if the test returned passed status
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
if [ ! -e "$golden" ]; then
|
||||
message="no golden output"
|
||||
sts=$T_FAIL_STATUS
|
||||
elif ! cmp -s "$golden" "$out"; then
|
||||
message="output differs"
|
||||
sts=$T_FAIL_STATUS
|
||||
diff -u "$golden" "$out" >> "$T_RESULTS/fail.log"
|
||||
fi
|
||||
else
|
||||
# get message from t_*() functions
|
||||
message=$(cat "$T_TMPDIR/status.msg")
|
||||
fi
|
||||
else
|
||||
# get message from t_*() functions
|
||||
message=$(cat "$T_TMPDIR/status.msg")
|
||||
fi
|
||||
|
||||
# see if anything unexpected was added to dmesg
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.after"
|
||||
diff --old-line-format="" --unchanged-line-format="" \
|
||||
"$T_TMPDIR/dmesg.before" "$T_TMPDIR/dmesg.after" > \
|
||||
"$T_TMPDIR/dmesg.new"
|
||||
# see if anything unexpected was added to dmesg
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.after"
|
||||
diff --old-line-format="" --unchanged-line-format="" \
|
||||
"$T_TMPDIR/dmesg.before" "$T_TMPDIR/dmesg.after" > \
|
||||
"$T_TMPDIR/dmesg.new"
|
||||
|
||||
if [ -s "$T_TMPDIR/dmesg.new" ]; then
|
||||
message="unexpected messages in dmesg"
|
||||
sts=$T_FAIL_STATUS
|
||||
cat "$T_TMPDIR/dmesg.new" >> "$T_RESULTS/fail.log"
|
||||
if [ -s "$T_TMPDIR/dmesg.new" ]; then
|
||||
message="unexpected messages in dmesg"
|
||||
sts=$T_FAIL_STATUS
|
||||
cat "$T_TMPDIR/dmesg.new" >> "$T_RESULTS/fail.log"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# record unknown exit status
|
||||
if [ "$sts" -lt "$T_FIRST_STATUS" -o "$sts" -gt "$T_LAST_STATUS" ]; then
|
||||
message="unknown status: $sts"
|
||||
sts=$T_FAIL_STATUS
|
||||
fi
|
||||
# record unknown exit status
|
||||
if [ "$sts" -lt "$T_FIRST_STATUS" -o "$sts" -gt "$T_LAST_STATUS" ]; then
|
||||
message="unknown status: $sts"
|
||||
sts=$T_FAIL_STATUS
|
||||
fi
|
||||
|
||||
# stop looping if we didn't pass
|
||||
if [ "$sts" != "$T_PASS_STATUS" ]; then
|
||||
break;
|
||||
fi
|
||||
done
|
||||
|
||||
# show and record the result of the test
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
|
||||
@@ -2,6 +2,7 @@ export-get-name-parent.sh
|
||||
basic-block-counts.sh
|
||||
basic-bad-mounts.sh
|
||||
basic-posix-acl.sh
|
||||
basic-acl-consistency.sh
|
||||
inode-items-updated.sh
|
||||
simple-inode-index.sh
|
||||
simple-staging.sh
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
@@ -29,7 +30,7 @@
|
||||
#include <errno.h>
|
||||
|
||||
static int size = 0;
|
||||
static int count = 0; /* XXX make this duration instead */
|
||||
static int duration = 0;
|
||||
|
||||
struct thread_info {
|
||||
int nr;
|
||||
@@ -41,6 +42,8 @@ static void *run_test_func(void *ptr)
|
||||
void *buf = NULL;
|
||||
char *addr = NULL;
|
||||
struct thread_info *tinfo = ptr;
|
||||
uint64_t seconds = 0;
|
||||
struct timespec ts;
|
||||
int c = 0;
|
||||
int fd;
|
||||
ssize_t read, written, ret;
|
||||
@@ -61,9 +64,15 @@ static void *run_test_func(void *ptr)
|
||||
|
||||
usleep(100000); /* 0.1sec to allow all threads to start roughly at the same time */
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &ts); /* record start time */
|
||||
seconds = ts.tv_sec + duration;
|
||||
|
||||
for (;;) {
|
||||
if (++c > count)
|
||||
break;
|
||||
if (++c % 16 == 0) {
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
if (ts.tv_sec >= seconds)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (rand() % 4) {
|
||||
case 0: /* pread */
|
||||
@@ -99,6 +108,8 @@ static void *run_test_func(void *ptr)
|
||||
memcpy(addr, buf, size); /* noerr */
|
||||
break;
|
||||
}
|
||||
|
||||
usleep(10000);
|
||||
}
|
||||
|
||||
munmap(addr, size);
|
||||
@@ -120,7 +131,7 @@ int main(int argc, char **argv)
|
||||
int i;
|
||||
|
||||
if (argc != 8) {
|
||||
fprintf(stderr, "%s requires 7 arguments - size count file1 file2 file3 file4 file5\n", argv[0]);
|
||||
fprintf(stderr, "%s requires 7 arguments - size duration file1 file2 file3 file4 file5\n", argv[0]);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@@ -130,9 +141,9 @@ int main(int argc, char **argv)
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
count = atoi(argv[2]);
|
||||
if (count < 0) {
|
||||
fprintf(stderr, "invalid count, must be greater than 0\n");
|
||||
duration = atoi(argv[2]);
|
||||
if (duration < 0) {
|
||||
fprintf(stderr, "invalid duration, must be greater than or equal to 0\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
|
||||
117
tests/tests/basic-acl-consistency.sh
Normal file
117
tests/tests/basic-acl-consistency.sh
Normal file
@@ -0,0 +1,117 @@
|
||||
|
||||
#
|
||||
# Test basic clustered posix acl consistency.
|
||||
#
|
||||
|
||||
t_require_commands getfacl setfacl
|
||||
|
||||
GETFACL="getfacl --absolute-names"
|
||||
|
||||
filter_scratch() {
|
||||
sed "s@$T_MSCR@t_mscr@g"
|
||||
}
|
||||
|
||||
acl_compare()
|
||||
{
|
||||
diff -u - <($GETFACL $T_MSCR/data/dir_a/dir_b | filter_scratch) <<EOF1
|
||||
# file: t_mscr/data/dir_a/dir_b
|
||||
# owner: t_usr_3
|
||||
# group: t_grp_3
|
||||
# flags: -s-
|
||||
user::rwx
|
||||
group::rwx
|
||||
group:t_grp_2:r-x
|
||||
mask::rwx
|
||||
other::---
|
||||
default:user::rwx
|
||||
default:group::rwx
|
||||
default:group:t_grp_2:r-x
|
||||
default:group:t_grp_3:rwx
|
||||
default:mask::rwx
|
||||
default:other::---
|
||||
|
||||
EOF1
|
||||
|
||||
test $? -eq 0 || t_fail "dir_b differs"
|
||||
|
||||
diff -u - <($GETFACL -p $T_MSCR/data/dir_a/dir_b/dir_c/dir_d | filter_scratch) <<EOF3
|
||||
# file: t_mscr/data/dir_a/dir_b/dir_c/dir_d
|
||||
# owner: t_usr_1
|
||||
# group: t_grp_1
|
||||
# flags: -s-
|
||||
user::rwx
|
||||
group::rwx
|
||||
group:t_grp_2:r-x
|
||||
mask::rwx
|
||||
other::---
|
||||
default:user::rwx
|
||||
default:group::rwx
|
||||
default:group:t_grp_2:r-x
|
||||
default:group:t_grp_3:rwx
|
||||
default:mask::rwx
|
||||
default:other::---
|
||||
|
||||
EOF3
|
||||
test $? -eq 0 || t_fail "dir_d differs"
|
||||
|
||||
diff -u - <($GETFACL $T_MSCR/data/dir_a/dir_b/dir_c | filter_scratch) <<EOF2
|
||||
# file: t_mscr/data/dir_a/dir_b/dir_c
|
||||
# owner: t_usr_3
|
||||
# group: t_grp_2
|
||||
# flags: -s-
|
||||
user::rwx
|
||||
group::rwx
|
||||
group:t_grp_2:r-x
|
||||
mask::rwx
|
||||
other::---
|
||||
default:user::rwx
|
||||
default:group::rwx
|
||||
default:group:t_grp_2:r-x
|
||||
default:group:t_grp_3:rwx
|
||||
default:mask::rwx
|
||||
default:other::---
|
||||
|
||||
EOF2
|
||||
test $? -eq 0 || t_fail "dir_c differs"
|
||||
}
|
||||
echo "== make scratch fs"
|
||||
t_scratch_mkfs
|
||||
t_scratch_mount
|
||||
|
||||
rm -rf $T_MSCR/data
|
||||
|
||||
echo "== create uid/gids"
|
||||
groupadd -g 7101 t_grp_1 > /dev/null 2>&1
|
||||
useradd -g 7101 -u 7101 t_usr_1 > /dev/null 2>&1
|
||||
groupadd -g 7102 t_grp_2 > /dev/null 2>&1
|
||||
groupadd -g 7103 t_grp_3 > /dev/null 2>&1
|
||||
useradd -g 7103 -u 7103 t_usr_3 > /dev/null 2>&1
|
||||
|
||||
echo "== set acls and permissions"
|
||||
mkdir -p $T_MSCR/data/dir_a/dir_b
|
||||
chown t_usr_3:t_grp_3 $T_MSCR/data/dir_a/dir_b
|
||||
chmod 2770 $T_MSCR/data/dir_a/dir_b
|
||||
setfacl -m g:t_grp_2:rx $T_MSCR/data/dir_a/dir_b
|
||||
setfacl -m d:g:t_grp_2:rx $T_MSCR/data/dir_a/dir_b
|
||||
setfacl -m d:g:t_grp_3:rwx $T_MSCR/data/dir_a/dir_b
|
||||
|
||||
mkdir -p $T_MSCR/data/dir_a/dir_b/dir_c
|
||||
chown t_usr_3:t_grp_2 $T_MSCR/data/dir_a/dir_b/dir_c
|
||||
setfacl -x g:t_grp_3 $T_MSCR/data/dir_a/dir_b/dir_c
|
||||
|
||||
mkdir -p $T_MSCR/data/dir_a/dir_b/dir_c/dir_d
|
||||
chown t_usr_1:t_grp_1 $T_MSCR/data/dir_a/dir_b/dir_c/dir_d
|
||||
setfacl -x g:t_grp_3 $T_MSCR/data/dir_a/dir_b/dir_c/dir_d
|
||||
|
||||
echo "== compare output"
|
||||
acl_compare
|
||||
|
||||
echo "== drop caches and compare again"
|
||||
sync
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
acl_compare
|
||||
|
||||
echo "== cleanup scratch fs"
|
||||
t_scratch_umount
|
||||
|
||||
t_pass
|
||||
@@ -12,25 +12,22 @@ mount_fail()
|
||||
}
|
||||
|
||||
echo "== prepare devices, mount point, and logs"
|
||||
SCR="$T_TMPDIR/mnt.scratch"
|
||||
mkdir -p "$SCR"
|
||||
t_scratch_mkfs
|
||||
> $T_TMP.mount.out
|
||||
scoutfs mkfs -f -Q 0,127.0.0.1,$T_SCRATCH_PORT "$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 \
|
||||
|| t_fail "mkfs failed"
|
||||
|
||||
echo "== bad devices, bad options"
|
||||
mount_fail -o _bad /dev/null /dev/null "$SCR"
|
||||
mount_fail -o _bad /dev/null /dev/null "$T_MSCR"
|
||||
|
||||
echo "== swapped devices"
|
||||
mount_fail -o metadev_path=$T_EX_DATA_DEV,quorum_slot_nr=0 "$T_EX_META_DEV" "$SCR"
|
||||
mount_fail -o metadev_path=$T_EX_DATA_DEV,quorum_slot_nr=0 "$T_EX_META_DEV" "$T_MSCR"
|
||||
|
||||
echo "== both meta devices"
|
||||
mount_fail -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_META_DEV" "$SCR"
|
||||
mount_fail -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_META_DEV" "$T_MSCR"
|
||||
|
||||
echo "== both data devices"
|
||||
mount_fail -o metadev_path=$T_EX_DATA_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
mount_fail -o metadev_path=$T_EX_DATA_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$T_MSCR"
|
||||
|
||||
echo "== good volume, bad option and good options"
|
||||
mount_fail -o _bad,metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
mount_fail -o _bad,metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$T_MSCR"
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -11,9 +11,8 @@ truncate -s $sz "$T_TMP.equal"
|
||||
truncate -s $large_sz "$T_TMP.large"
|
||||
|
||||
echo "== make scratch fs"
|
||||
t_quiet scoutfs mkfs -f -Q 0,127.0.0.1,$T_SCRATCH_PORT "$T_EX_META_DEV" "$T_EX_DATA_DEV"
|
||||
SCR="$T_TMPDIR/mnt.scratch"
|
||||
mkdir -p "$SCR"
|
||||
t_scratch_mkfs
|
||||
mkdir -p "$T_MSCR"
|
||||
|
||||
echo "== small new data device fails"
|
||||
t_rc scoutfs prepare-empty-data-device "$T_EX_META_DEV" "$T_TMP.small"
|
||||
@@ -23,13 +22,13 @@ t_rc scoutfs prepare-empty-data-device --check "$T_EX_META_DEV" "$T_TMP.small"
|
||||
t_rc scoutfs prepare-empty-data-device --check "$T_EX_META_DEV"
|
||||
|
||||
echo "== preparing while mounted fails"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$T_MSCR"
|
||||
t_rc scoutfs prepare-empty-data-device "$T_EX_META_DEV" "$T_TMP.equal"
|
||||
umount "$SCR"
|
||||
umount "$T_MSCR"
|
||||
|
||||
echo "== preparing without recovery fails"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
umount -f "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$T_MSCR"
|
||||
umount -f "$T_MSCR"
|
||||
t_rc scoutfs prepare-empty-data-device "$T_EX_META_DEV" "$T_TMP.equal"
|
||||
|
||||
echo "== check sees metadata errors"
|
||||
@@ -37,16 +36,16 @@ t_rc scoutfs prepare-empty-data-device --check "$T_EX_META_DEV"
|
||||
t_rc scoutfs prepare-empty-data-device --check "$T_EX_META_DEV" "$T_TMP.equal"
|
||||
|
||||
echo "== preparing with file data fails"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
echo hi > "$SCR"/file
|
||||
umount "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$T_MSCR"
|
||||
echo hi > "$T_MSCR"/file
|
||||
umount "$T_MSCR"
|
||||
scoutfs print "$T_EX_META_DEV" > "$T_TMP.print"
|
||||
t_rc scoutfs prepare-empty-data-device "$T_EX_META_DEV" "$T_TMP.equal"
|
||||
|
||||
echo "== preparing after emptied"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$SCR"
|
||||
rm -f "$SCR"/file
|
||||
umount "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$T_EX_DATA_DEV" "$T_MSCR"
|
||||
rm -f "$T_MSCR"/file
|
||||
umount "$T_MSCR"
|
||||
t_rc scoutfs prepare-empty-data-device "$T_EX_META_DEV" "$T_TMP.equal"
|
||||
|
||||
echo "== checks pass"
|
||||
@@ -55,22 +54,22 @@ t_rc scoutfs prepare-empty-data-device --check "$T_EX_META_DEV" "$T_TMP.equal"
|
||||
|
||||
echo "== using prepared"
|
||||
scr_loop=$(losetup --find --show "$T_TMP.equal")
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$scr_loop" "$SCR"
|
||||
touch "$SCR"/equal_prepared
|
||||
equal_tot=$(scoutfs statfs -s total_data_blocks -p "$SCR")
|
||||
umount "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$scr_loop" "$T_MSCR"
|
||||
touch "$T_MSCR"/equal_prepared
|
||||
equal_tot=$(scoutfs statfs -s total_data_blocks -p "$T_MSCR")
|
||||
umount "$T_MSCR"
|
||||
losetup -d "$scr_loop"
|
||||
|
||||
echo "== preparing larger and resizing"
|
||||
t_rc scoutfs prepare-empty-data-device "$T_EX_META_DEV" "$T_TMP.large"
|
||||
scr_loop=$(losetup --find --show "$T_TMP.large")
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$scr_loop" "$SCR"
|
||||
touch "$SCR"/large_prepared
|
||||
ls "$SCR"
|
||||
scoutfs resize-devices -p "$SCR" -d $large_sz
|
||||
large_tot=$(scoutfs statfs -s total_data_blocks -p "$SCR")
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 "$scr_loop" "$T_MSCR"
|
||||
touch "$T_MSCR"/large_prepared
|
||||
ls "$T_MSCR"
|
||||
scoutfs resize-devices -p "$T_MSCR" -d $large_sz
|
||||
large_tot=$(scoutfs statfs -s total_data_blocks -p "$T_MSCR")
|
||||
test "$large_tot" -gt "$equal_tot" ; echo "resized larger test rc: $?"
|
||||
umount "$SCR"
|
||||
umount "$T_MSCR"
|
||||
losetup -d "$scr_loop"
|
||||
|
||||
echo "== cleanup"
|
||||
|
||||
@@ -54,21 +54,16 @@ after=$(free_blocks Data "$T_M0")
|
||||
test "$before" == "$after" || \
|
||||
t_fail "$after free data blocks after rm, expected $before"
|
||||
|
||||
# XXX this is all pretty manual, would be nice to have helpers
|
||||
echo "== make small meta fs"
|
||||
# meta device just big enough for reserves and the metadata we'll fill
|
||||
scoutfs mkfs -A -f -Q 0,127.0.0.1,$T_SCRATCH_PORT -m 10G "$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 || \
|
||||
t_fail "mkfs failed"
|
||||
SCR="$T_TMPDIR/mnt.scratch"
|
||||
mkdir -p "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 \
|
||||
"$T_EX_DATA_DEV" "$SCR"
|
||||
t_scratch_mkfs -A -m 10G
|
||||
t_scratch_mount
|
||||
|
||||
echo "== create large xattrs until we fill up metadata"
|
||||
mkdir -p "$SCR/xattrs"
|
||||
mkdir -p "$T_MSCR/xattrs"
|
||||
|
||||
for f in $(seq 1 100000); do
|
||||
file="$SCR/xattrs/file-$f"
|
||||
file="$T_MSCR/xattrs/file-$f"
|
||||
touch "$file"
|
||||
|
||||
LC_ALL=C create_xattr_loop -c 1000 -n user.scoutfs-enospc -p "$file" -s 65535 > $T_TMP.cxl 2>&1
|
||||
@@ -84,10 +79,10 @@ for f in $(seq 1 100000); do
|
||||
done
|
||||
|
||||
echo "== remove files with xattrs after enospc"
|
||||
rm -rf "$SCR/xattrs"
|
||||
rm -rf "$T_MSCR/xattrs"
|
||||
|
||||
echo "== make sure we can create again"
|
||||
file="$SCR/file-after"
|
||||
file="$T_MSCR/file-after"
|
||||
C=120
|
||||
while (( C-- )); do
|
||||
touch $file 2> /dev/null && break
|
||||
@@ -99,7 +94,6 @@ sync
|
||||
rm -f "$file"
|
||||
|
||||
echo "== cleanup small meta fs"
|
||||
umount "$SCR"
|
||||
rmdir "$SCR"
|
||||
t_scratch_umount
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
t_require_commands sleep touch grep sync scoutfs
|
||||
t_require_mounts 2
|
||||
|
||||
# regularly see ~20/~30s
|
||||
VERIFY_TIMEOUT_SECS=90
|
||||
|
||||
#
|
||||
# Make sure that all mounts can read the results of a write from each
|
||||
# mount.
|
||||
@@ -40,8 +43,10 @@ verify_fenced_run()
|
||||
|
||||
for rid in $rids; do
|
||||
grep -q ".* running rid '$rid'.* args 'ignored run args'" "$T_FENCED_LOG" || \
|
||||
t_fail "fenced didn't execute RUN script for rid $rid"
|
||||
return 1
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
echo "== make sure all mounts can see each other"
|
||||
@@ -54,14 +59,7 @@ rid=$(t_mount_rid $cl)
|
||||
echo "cl $cl sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_force_umount $cl
|
||||
# wait for client reconnection to timeout
|
||||
while grep -q $rid $(t_debugfs_path $sv)/connections; do
|
||||
sleep .5
|
||||
done
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rid
|
||||
t_mount $cl
|
||||
check_read_write
|
||||
|
||||
@@ -83,15 +81,7 @@ for cl in $(t_fs_nrs); do
|
||||
t_force_umount $cl
|
||||
done
|
||||
|
||||
# wait for all client reconnections to timeout
|
||||
while egrep -q "($pattern)" $(t_debugfs_path $sv)/connections; do
|
||||
sleep .5
|
||||
done
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rids
|
||||
# remount all the clients
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
@@ -107,12 +97,7 @@ rid=$(t_mount_rid $sv)
|
||||
echo "sv $sv rid $rid" >> "$T_TMP.log"
|
||||
sync
|
||||
t_force_umount $sv
|
||||
t_wait_for_leader
|
||||
# wait until new server is done fencing unmounted leader rid
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rid
|
||||
t_mount $sv
|
||||
check_read_write
|
||||
|
||||
@@ -127,11 +112,7 @@ for nr in $(t_fs_nrs); do
|
||||
t_force_umount $nr
|
||||
done
|
||||
t_mount_all
|
||||
# wait for all fence requests to complete
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
t_wait_until_timeout $VERIFY_TIMEOUT_SECS verify_fenced_run $rids
|
||||
check_read_write
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -72,7 +72,7 @@ touch $T_D0/dir/file
|
||||
mkdir $T_D0/dir/dir
|
||||
ln -s $T_D0/dir/file $T_D0/dir/symlink
|
||||
mknod $T_D0/dir/char c 1 3 # null
|
||||
mknod $T_D0/dir/block b 7 0 # loop0
|
||||
mknod $T_D0/dir/block b 42 0 # SAMPLE block dev - nonexistant/demo use only number
|
||||
for name in $(ls -UA $T_D0/dir | sort); do
|
||||
ino=$(stat -c '%i' $T_D0/dir/$name)
|
||||
$GRE $ino | filter_types
|
||||
|
||||
@@ -61,18 +61,28 @@ rm -f "$T_D1/file"
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
|
||||
# Hurry along the orphan scanners. If any are currently asleep, we will
|
||||
# have to wait at least their current scan interval before they wake up,
|
||||
# run, and notice their new interval.
|
||||
t_save_all_sysfs_mount_options orphan_scan_delay_ms
|
||||
t_set_all_sysfs_mount_options orphan_scan_delay_ms 500
|
||||
t_wait_for_orphan_scan_runs
|
||||
|
||||
echo "== unlink wait for open on other mount"
|
||||
echo "contents" > "$T_D0/file"
|
||||
ino=$(stat -c "%i" "$T_D0/file")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
|
||||
exec {FD}<"$T_D0/file"
|
||||
rm -f "$T_D1/file"
|
||||
echo "contents" > "$T_D0/badfile"
|
||||
ino=$(stat -c "%i" "$T_D0/badfile")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/badfile")
|
||||
exec {FD}<"$T_D0/badfile"
|
||||
rm -f "$T_D1/badfile"
|
||||
echo "mount 0 contents after mount 1 rm: $(cat <&$FD)"
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
exec {FD}>&- # close
|
||||
# we know that revalidating will unhash the remote dentry
|
||||
stat "$T_D0/file" 2>&1 | sed 's/cannot statx/cannot stat/' | t_filter_fs
|
||||
stat "$T_D0/badfile" 2>&1 | sed 's/cannot statx/cannot stat/' | t_filter_fs
|
||||
t_force_log_merge
|
||||
# wait for orphan scanners to pick up the unlinked inode and become idle
|
||||
t_wait_for_no_orphans
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
|
||||
@@ -83,16 +93,20 @@ rm -f "$T_D0/dir"/files-*
|
||||
rmdir "$T_D0/dir"
|
||||
|
||||
echo "== open files survive remote scanning orphans"
|
||||
echo "contents" > "$T_D0/file"
|
||||
ino=$(stat -c "%i" "$T_D0/file")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
|
||||
exec {FD}<"$T_D0/file"
|
||||
rm -f "$T_D0/file"
|
||||
echo "contents" > "$T_D0/lastfile"
|
||||
ino=$(stat -c "%i" "$T_D0/lastfile")
|
||||
dseq=$(scoutfs stat -s data_seq "$T_D0/lastfile")
|
||||
exec {FD}<"$T_D0/lastfile"
|
||||
rm -f "$T_D0/lastfile"
|
||||
t_umount 1
|
||||
t_mount 1
|
||||
echo "mount 0 contents after mount 1 remounted: $(cat <&$FD)"
|
||||
exec {FD}>&- # close
|
||||
t_force_log_merge
|
||||
t_wait_for_no_orphans
|
||||
check_ino_index "$ino" "$dseq" "$T_M0"
|
||||
check_ino_index "$ino" "$dseq" "$T_M1"
|
||||
|
||||
t_restore_all_sysfs_mount_options orphan_scan_delay_ms
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
t_require_commands mmap_stress mmap_validate scoutfs xfs_io
|
||||
|
||||
echo "== mmap_stress"
|
||||
mmap_stress 8192 2000 "$T_D0/mmap_stress" "$T_D1/mmap_stress" "$T_D2/mmap_stress" "$T_D3/mmap_stress" "$T_D4/mmap_stress" | sed 's/:.*//g' | sort
|
||||
mmap_stress 8192 30 "$T_D0/mmap_stress" "$T_D0/mmap_stress" "$T_D0/mmap_stress" "$T_D3/mmap_stress" "$T_D3/mmap_stress" | sed 's/:.*//g' | sort
|
||||
|
||||
echo "== basic mmap/read/write consistency checks"
|
||||
mmap_validate 256 1000 "$T_D0/mmap_val1" "$T_D1/mmap_val1"
|
||||
|
||||
@@ -62,7 +62,7 @@ test_timeout()
|
||||
sleep 1
|
||||
|
||||
# tear down the current server/leader
|
||||
t_force_umount $sv
|
||||
t_force_umount $sv &
|
||||
|
||||
# see how long it takes for the next leader to start
|
||||
start=$(time_ms)
|
||||
@@ -73,6 +73,7 @@ test_timeout()
|
||||
echo "to $to delay $delay" >> $T_TMP.delay
|
||||
|
||||
# restore the mount that we tore down
|
||||
wait
|
||||
t_mount $sv
|
||||
|
||||
# make sure the new leader delay was reasonable, allowing for some slack
|
||||
|
||||
@@ -8,19 +8,19 @@ t_require_mounts 2
|
||||
echo "=== renameat2 noreplace flag test"
|
||||
|
||||
# give each mount their own dir (lock group) to minimize create contention
|
||||
mkdir $T_M0/dir0
|
||||
mkdir $T_M1/dir1
|
||||
mkdir $T_D0/dir0
|
||||
mkdir $T_D1/dir1
|
||||
|
||||
echo "=== run two asynchronous calls to renameat2 NOREPLACE"
|
||||
for i in $(seq 0 100); do
|
||||
# prepare inputs in isolation
|
||||
touch "$T_M0/dir0/old0"
|
||||
touch "$T_M1/dir1/old1"
|
||||
touch "$T_D0/dir0/old0"
|
||||
touch "$T_D1/dir1/old1"
|
||||
|
||||
# race doing noreplace renames, both can't succeed
|
||||
dumb_renameat2 -n "$T_M0/dir0/old0" "$T_M0/dir0/sharednew" 2> /dev/null &
|
||||
dumb_renameat2 -n "$T_D0/dir0/old0" "$T_D0/dir0/sharednew" 2> /dev/null &
|
||||
pid0=$!
|
||||
dumb_renameat2 -n "$T_M1/dir1/old1" "$T_M1/dir0/sharednew" 2> /dev/null &
|
||||
dumb_renameat2 -n "$T_D1/dir1/old1" "$T_D1/dir0/sharednew" 2> /dev/null &
|
||||
pid1=$!
|
||||
|
||||
wait $pid0
|
||||
@@ -31,7 +31,7 @@ for i in $(seq 0 100); do
|
||||
test "$rc0" == 0 -a "$rc1" == 0 && t_fail "both renames succeeded"
|
||||
|
||||
# blow away possible files for either race outcome
|
||||
rm -f "$T_M0/dir0/old0" "$T_M1/dir1/old1" "$T_M0/dir0/sharednew" "$T_M1/dir1/sharednew"
|
||||
rm -f "$T_D0/dir0/old0" "$T_D1/dir1/old1" "$T_D0/dir0/sharednew" "$T_D1/dir1/sharednew"
|
||||
done
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -19,8 +19,8 @@ df_free() {
|
||||
}
|
||||
|
||||
same_totals() {
|
||||
cur_meta_tot=$(statfs_total meta "$SCR")
|
||||
cur_data_tot=$(statfs_total data "$SCR")
|
||||
cur_meta_tot=$(statfs_total meta "$T_MSCR")
|
||||
cur_data_tot=$(statfs_total data "$T_MSCR")
|
||||
|
||||
test "$cur_meta_tot" == "$exp_meta_tot" || \
|
||||
t_fail "cur total_meta_blocks $cur_meta_tot != expected $exp_meta_tot"
|
||||
@@ -34,10 +34,10 @@ same_totals() {
|
||||
# some slop to account for reserved blocks and concurrent allocation.
|
||||
#
|
||||
devices_grew() {
|
||||
cur_meta_tot=$(statfs_total meta "$SCR")
|
||||
cur_data_tot=$(statfs_total data "$SCR")
|
||||
cur_meta_df=$(df_free MetaData "$SCR")
|
||||
cur_data_df=$(df_free Data "$SCR")
|
||||
cur_meta_tot=$(statfs_total meta "$T_MSCR")
|
||||
cur_data_tot=$(statfs_total data "$T_MSCR")
|
||||
cur_meta_df=$(df_free MetaData "$T_MSCR")
|
||||
cur_data_df=$(df_free Data "$T_MSCR")
|
||||
|
||||
local grow_meta_tot=$(echo "$exp_meta_tot * 2" | bc)
|
||||
local grow_data_tot=$(echo "$exp_data_tot * 2" | bc)
|
||||
@@ -70,19 +70,13 @@ size_data=$(blockdev --getsize64 "$T_EX_DATA_DEV")
|
||||
quarter_meta=$(echo "$size_meta / 4" | bc)
|
||||
quarter_data=$(echo "$size_data / 4" | bc)
|
||||
|
||||
# XXX this is all pretty manual, would be nice to have helpers
|
||||
echo "== make initial small fs"
|
||||
scoutfs mkfs -A -f -Q 0,127.0.0.1,$T_SCRATCH_PORT -m $quarter_meta -d $quarter_data \
|
||||
"$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 || \
|
||||
t_fail "mkfs failed"
|
||||
SCR="$T_TMPDIR/mnt.scratch"
|
||||
mkdir -p "$SCR"
|
||||
mount -t scoutfs -o metadev_path=$T_EX_META_DEV,quorum_slot_nr=0 \
|
||||
"$T_EX_DATA_DEV" "$SCR"
|
||||
t_scratch_mkfs -A -m $quarter_meta -d $quarter_data
|
||||
t_scratch_mount
|
||||
|
||||
# then calculate sizes based on blocks that mkfs used
|
||||
quarter_meta=$(echo "$(statfs_total meta "$SCR") * 64 * 1024" | bc)
|
||||
quarter_data=$(echo "$(statfs_total data "$SCR") * 4 * 1024" | bc)
|
||||
quarter_meta=$(echo "$(statfs_total meta "$T_MSCR") * 64 * 1024" | bc)
|
||||
quarter_data=$(echo "$(statfs_total data "$T_MSCR") * 4 * 1024" | bc)
|
||||
whole_meta=$(echo "$quarter_meta * 4" | bc)
|
||||
whole_data=$(echo "$quarter_data * 4" | bc)
|
||||
outsize_meta=$(echo "$whole_meta * 2" | bc)
|
||||
@@ -93,59 +87,58 @@ shrink_meta=$(echo "$quarter_meta / 2" | bc)
|
||||
shrink_data=$(echo "$quarter_data / 2" | bc)
|
||||
|
||||
# and save expected values for checks
|
||||
exp_meta_tot=$(statfs_total meta "$SCR")
|
||||
exp_meta_df=$(df_free MetaData "$SCR")
|
||||
exp_data_tot=$(statfs_total data "$SCR")
|
||||
exp_data_df=$(df_free Data "$SCR")
|
||||
exp_meta_tot=$(statfs_total meta "$T_MSCR")
|
||||
exp_meta_df=$(df_free MetaData "$T_MSCR")
|
||||
exp_data_tot=$(statfs_total data "$T_MSCR")
|
||||
exp_data_df=$(df_free Data "$T_MSCR")
|
||||
|
||||
echo "== 0s do nothing"
|
||||
scoutfs resize-devices -p "$SCR"
|
||||
scoutfs resize-devices -p "$SCR" -m 0
|
||||
scoutfs resize-devices -p "$SCR" -d 0
|
||||
scoutfs resize-devices -p "$SCR" -m 0 -d 0
|
||||
scoutfs resize-devices -p "$T_MSCR"
|
||||
scoutfs resize-devices -p "$T_MSCR" -m 0
|
||||
scoutfs resize-devices -p "$T_MSCR" -d 0
|
||||
scoutfs resize-devices -p "$T_MSCR" -m 0 -d 0
|
||||
|
||||
echo "== shrinking fails"
|
||||
scoutfs resize-devices -p "$SCR" -m $shrink_meta
|
||||
scoutfs resize-devices -p "$SCR" -d $shrink_data
|
||||
scoutfs resize-devices -p "$SCR" -m $shrink_meta -d $shrink_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $shrink_meta
|
||||
scoutfs resize-devices -p "$T_MSCR" -d $shrink_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $shrink_meta -d $shrink_data
|
||||
same_totals
|
||||
|
||||
echo "== existing sizes do nothing"
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_meta
|
||||
scoutfs resize-devices -p "$SCR" -d $quarter_data
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_meta -d $quarter_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $quarter_meta
|
||||
scoutfs resize-devices -p "$T_MSCR" -d $quarter_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $quarter_meta -d $quarter_data
|
||||
same_totals
|
||||
|
||||
echo "== growing outside device fails"
|
||||
scoutfs resize-devices -p "$SCR" -m $outsize_meta
|
||||
scoutfs resize-devices -p "$SCR" -d $outsize_data
|
||||
scoutfs resize-devices -p "$SCR" -m $outsize_meta -d $outsize_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $outsize_meta
|
||||
scoutfs resize-devices -p "$T_MSCR" -d $outsize_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $outsize_meta -d $outsize_data
|
||||
same_totals
|
||||
|
||||
echo "== resizing meta works"
|
||||
scoutfs resize-devices -p "$SCR" -m $half_meta
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $half_meta
|
||||
devices_grew meta
|
||||
|
||||
echo "== resizing data works"
|
||||
scoutfs resize-devices -p "$SCR" -d $half_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -d $half_data
|
||||
devices_grew data
|
||||
|
||||
echo "== shrinking back fails"
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_meta
|
||||
scoutfs resize-devices -p "$SCR" -m $quarter_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $quarter_meta
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $quarter_data
|
||||
same_totals
|
||||
|
||||
echo "== resizing again does nothing"
|
||||
scoutfs resize-devices -p "$SCR" -m $half_meta
|
||||
scoutfs resize-devices -p "$SCR" -m $half_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $half_meta
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $half_data
|
||||
same_totals
|
||||
|
||||
echo "== resizing to full works"
|
||||
scoutfs resize-devices -p "$SCR" -m $whole_meta -d $whole_data
|
||||
scoutfs resize-devices -p "$T_MSCR" -m $whole_meta -d $whole_data
|
||||
devices_grew meta data
|
||||
|
||||
echo "== cleanup extra fs"
|
||||
umount "$SCR"
|
||||
rmdir "$SCR"
|
||||
t_scratch_umount
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -63,73 +63,47 @@ export MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
EOF
|
||||
|
||||
cat << EOF > local.exclude
|
||||
generic/003 # missing atime update in buffered read
|
||||
generic/075 # file content mismatch failures (fds, etc)
|
||||
generic/103 # enospc causes trans commit failures
|
||||
generic/108 # mount fails on failing device?
|
||||
generic/112 # file content mismatch failures (fds, etc)
|
||||
generic/213 # enospc causes trans commit failures
|
||||
generic/318 # can't support user namespaces until v5.11
|
||||
generic/321 # requires selinux enabled for '+' in ls?
|
||||
generic/338 # BUG_ON update inode error handling
|
||||
generic/347 # _dmthin_mount doesn't work?
|
||||
generic/356 # swap
|
||||
generic/357 # swap
|
||||
generic/409 # bind mounts not scripted yet
|
||||
generic/410 # bind mounts not scripted yet
|
||||
generic/411 # bind mounts not scripted yet
|
||||
generic/423 # symlink inode size is strlen() + 1 on scoutfs
|
||||
generic/430 # xfs_io copy_range missing in el7
|
||||
generic/431 # xfs_io copy_range missing in el7
|
||||
generic/432 # xfs_io copy_range missing in el7
|
||||
generic/433 # xfs_io copy_range missing in el7
|
||||
generic/434 # xfs_io copy_range missing in el7
|
||||
generic/441 # dm-mapper
|
||||
generic/444 # el9's posix_acl_update_mode is buggy ?
|
||||
generic/467 # open_by_handle ESTALE
|
||||
generic/472 # swap
|
||||
generic/484 # dm-mapper
|
||||
generic/493 # swap
|
||||
generic/494 # swap
|
||||
generic/495 # swap
|
||||
generic/496 # swap
|
||||
generic/497 # swap
|
||||
generic/532 # xfs_io statx attrib_mask missing in el7
|
||||
generic/554 # swap
|
||||
generic/563 # cgroup+loopdev
|
||||
generic/564 # xfs_io copy_range missing in el7
|
||||
generic/565 # xfs_io copy_range missing in el7
|
||||
generic/568 # falloc not resulting in block count increase
|
||||
generic/569 # swap
|
||||
generic/570 # swap
|
||||
generic/620 # dm-hugedisk
|
||||
generic/633 # id-mapped mounts missing in el7
|
||||
generic/636 # swap
|
||||
generic/641 # swap
|
||||
generic/643 # swap
|
||||
EOF
|
||||
cp "$T_EXTRA/local.exclude" local.exclude
|
||||
|
||||
t_restore_output
|
||||
t_stdout_invoked
|
||||
echo " (showing output of xfstests)"
|
||||
|
||||
args="-E local.exclude ${T_XFSTESTS_ARGS:--g quick}"
|
||||
./check $args
|
||||
# the fs is unmounted when check finishes
|
||||
|
||||
t_stdout_compare
|
||||
|
||||
#
|
||||
# ./check writes the results of the run to check.log. It lists
|
||||
# the tests it ran, skipped, or failed. Then it writes a line saying
|
||||
# everything passed or some failed. We scrape the most recent run and
|
||||
# use it as the output to compare to make sure that we run the right
|
||||
# tests and get the right results.
|
||||
# ./check writes the results of the run to check.log. It lists the
|
||||
# tests it ran, skipped, or failed. Then it writes a line saying
|
||||
# everything passed or some failed.
|
||||
#
|
||||
|
||||
#
|
||||
# If XFSTESTS_ARGS were specified then we just pass/fail to match the
|
||||
# check run.
|
||||
#
|
||||
if [ -n "$T_XFSTESTS_ARGS" ]; then
|
||||
if tail -1 results/check.log | grep -q "Failed"; then
|
||||
t_fail
|
||||
else
|
||||
t_pass
|
||||
fi
|
||||
fi
|
||||
|
||||
#
|
||||
# Otherwise, typically, when there were no args then we scrape the most
|
||||
# recent run and use it as the output to compare to make sure that we
|
||||
# run the right tests and get the right results.
|
||||
#
|
||||
awk '
|
||||
/^(Ran|Not run|Failures):.*/ {
|
||||
if (pf) {
|
||||
res=""
|
||||
pf=""
|
||||
} res = res "\n" $0
|
||||
}
|
||||
res = res "\n" $0
|
||||
}
|
||||
/^(Passed|Failed).*tests$/ {
|
||||
pf=$0
|
||||
@@ -139,10 +113,14 @@ awk '
|
||||
}' < results/check.log > "$T_TMPDIR/results"
|
||||
|
||||
# put a test per line so diff shows tests that differ
|
||||
egrep "^(Ran|Not run|Failures):" "$T_TMPDIR/results" | \
|
||||
fmt -w 1 > "$T_TMPDIR/results.fmt"
|
||||
egrep "^(Passed|Failed).*tests$" "$T_TMPDIR/results" >> "$T_TMPDIR/results.fmt"
|
||||
grep -E "^(Ran|Not run|Failures):" "$T_TMPDIR/results" | fmt -w 1 > "$T_TMPDIR/results.fmt"
|
||||
grep -E "^(Passed|Failed).*tests$" "$T_TMPDIR/results" >> "$T_TMPDIR/results.fmt"
|
||||
|
||||
t_compare_output cat "$T_TMPDIR/results.fmt"
|
||||
diff -u "$T_EXTRA/expected-results" "$T_TMPDIR/results.fmt" > "$T_TMPDIR/results.diff"
|
||||
if [ -s "$T_TMPDIR/results.diff" ]; then
|
||||
echo "tests that were skipped/run differed from expected:"
|
||||
cat "$T_TMPDIR/results.diff"
|
||||
t_fail
|
||||
fi
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -62,31 +62,27 @@ test -x "$SCOUTFS_FENCED_RUN" || \
|
||||
# files disappear.
|
||||
#
|
||||
|
||||
# generate failure messages to stderr while still echoing 0 for the caller
|
||||
careful_cat()
|
||||
# silence error messages
|
||||
quiet_cat()
|
||||
{
|
||||
local path="$@"
|
||||
|
||||
cat "$@" || echo 0
|
||||
cat "$@" 2>/dev/null
|
||||
}
|
||||
|
||||
while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
shopt -s nullglob
|
||||
for fence in /sys/fs/scoutfs/*/fence/*; do
|
||||
# catches unmatched regex when no dirs
|
||||
if [ ! -d "$fence" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# skip requests that have been handled
|
||||
if [ "$(careful_cat $fence/fenced)" == 1 -o \
|
||||
"$(careful_cat $fence/error)" == 1 ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
srv=$(basename $(dirname $(dirname $fence)))
|
||||
rid="$(cat $fence/rid)"
|
||||
ip="$(cat $fence/ipv4_addr)"
|
||||
reason="$(cat $fence/reason)"
|
||||
fenced="$(quiet_cat $fence/fenced)"
|
||||
error="$(quiet_cat $fence/error)"
|
||||
rid="$(quiet_cat $fence/rid)"
|
||||
ip="$(quiet_cat $fence/ipv4_addr)"
|
||||
reason="$(quiet_cat $fence/reason)"
|
||||
|
||||
# request dirs can linger then disappear after fenced/error is set
|
||||
if [ ! -d "$fence" -o "$fenced" == "1" -o "$error" == "1" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
log_message "server $srv fencing rid $rid at IP $ip for $reason"
|
||||
|
||||
|
||||
@@ -55,6 +55,30 @@ with initial sparse regions (perhaps by multiple threads writing to
|
||||
different regions) and wasted space isn't an issue (perhaps because the
|
||||
file population contains few small files).
|
||||
.TP
|
||||
.B ino_alloc_per_lock=<number>
|
||||
This option determines how many inode numbers are allocated in the same
|
||||
cluster lock. The default, and maximum, is 1024. The minimum is 1.
|
||||
Allocating fewer inodes per lock can allow more parallelism between
|
||||
mounts because there are more locks that cover the same number of
|
||||
created files. This can be helpful when working with smaller numbers of
|
||||
large files.
|
||||
.TP
|
||||
.B lock_idle_count=<number>
|
||||
This option sets the number of locks that the client will allow to
|
||||
remain idle after being granted. If the number of locks exceeds this
|
||||
count then the client will try to free the oldest locks. This setting
|
||||
is per-mount and only changes the behavior of that mount.
|
||||
.sp
|
||||
Idle locks are not reclaimed by memory pressure so this option
|
||||
determines the limit of how much memory is likely to be pinned by
|
||||
allocated idle locks. Setting this too low can increase latency of
|
||||
operations as repeated use of a working set of locks has to request the
|
||||
locks from the network rather than using granted idle locks.
|
||||
.sp
|
||||
The count is not strictly enforced. Operations are allowed to use locks
|
||||
while over the limit to avoid deadlocks under heavy concurrent load.
|
||||
Exceeding the count only attempts freeing of idle locks.
|
||||
.TP
|
||||
.B log_merge_wait_timeout_ms=<number>
|
||||
This option sets the amount of time, in milliseconds, that log merge
|
||||
creation can wait before timing out. This setting is per-mount, only
|
||||
|
||||
@@ -160,15 +160,16 @@ int parse_timespec(char *str, struct timespec *ts)
|
||||
* Parse a quorum slot specification string "NR,ADDR,PORT" into its
|
||||
* component parts. We use sscanf to both parse the leading NR and
|
||||
* trailing PORT integers, and to pull out the inner ADDR string which
|
||||
* is then parsed to make sure that it's a valid unicast ipv4 address.
|
||||
* is then parsed to make sure that it's a valid unicast ip address.
|
||||
* We require that all components be specified, and sccanf will check
|
||||
* this by the number of matches it returns.
|
||||
*/
|
||||
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
|
||||
{
|
||||
#define ADDR_CHARS 45 /* max ipv6 */
|
||||
char addr[ADDR_CHARS + 1] = {'\0',};
|
||||
#define ADDR_CHARS 45 /* (INET6_ADDRSTRLEN - 1) */
|
||||
char addr[INET6_ADDRSTRLEN] = {'\0',};
|
||||
struct in_addr in;
|
||||
struct in6_addr in6;
|
||||
int port;
|
||||
int parsed;
|
||||
int nr;
|
||||
@@ -206,15 +207,25 @@ int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (inet_aton(addr, &in) == 0 || htonl(in.s_addr) == 0 ||
|
||||
htonl(in.s_addr) == UINT_MAX) {
|
||||
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
|
||||
addr, arg);
|
||||
return -EINVAL;
|
||||
if (inet_pton(AF_INET, addr, &in) == 1) {
|
||||
if (htonl(in.s_addr) == 0 || htonl(in.s_addr) == UINT_MAX) {
|
||||
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
|
||||
addr, arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
slot->addr.v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
slot->addr.v4.addr = cpu_to_le32(htonl(in.s_addr));
|
||||
slot->addr.v4.port = cpu_to_le16(port);
|
||||
} else if (inet_pton(AF_INET6, addr, &in6) == 1) {
|
||||
if (IN6_IS_ADDR_UNSPECIFIED(&in6) || IN6_IS_ADDR_MULTICAST(&in6)) {
|
||||
printf("invalid ipv6 address '%s' in quorum slot '%s'\n",
|
||||
addr, arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
slot->addr.v6.family = cpu_to_le16(SCOUTFS_AF_IPV6);
|
||||
memcpy(slot->addr.v6.addr, &in6, 16);
|
||||
slot->addr.v6.port = cpu_to_le16(port);
|
||||
}
|
||||
|
||||
slot->addr.v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
slot->addr.v4.addr = cpu_to_le32(htonl(in.s_addr));
|
||||
slot->addr.v4.port = cpu_to_le16(port);
|
||||
return nr;
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "srch.h"
|
||||
#include "leaf_item_hash.h"
|
||||
#include "dev.h"
|
||||
#include "quorum.h"
|
||||
|
||||
static void print_block_header(struct scoutfs_block_header *hdr, int size)
|
||||
{
|
||||
@@ -400,12 +401,20 @@ static int print_mounted_client_entry(struct scoutfs_key *key, u64 seq, u8 flags
|
||||
{
|
||||
struct scoutfs_mounted_client_btree_val *mcv = val;
|
||||
struct in_addr in;
|
||||
char ip6addr[INET6_ADDRSTRLEN];
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
in.s_addr = htonl(le32_to_cpu(mcv->addr.v4.addr));
|
||||
if (mcv->addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
|
||||
in.s_addr = htonl(le32_to_cpu(mcv->addr.v4.addr));
|
||||
|
||||
printf(" rid %016llx ipv4_addr %s flags 0x%x\n",
|
||||
le64_to_cpu(key->skmc_rid), inet_ntoa(in), mcv->flags);
|
||||
printf(" rid %016llx ipv4_addr %s flags 0x%x\n",
|
||||
le64_to_cpu(key->skmc_rid), inet_ntoa(in), mcv->flags);
|
||||
} else if (mcv->addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
|
||||
printf(" rid %016llx ipv6_addr %s flags 0x%x\n",
|
||||
le64_to_cpu(key->skmc_rid),
|
||||
inet_ntop(AF_INET, mcv->addr.v6.addr, ip6addr, INET6_ADDRSTRLEN),
|
||||
mcv->flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -891,26 +900,40 @@ static int print_btree_leaf_items(int fd, struct scoutfs_super_block *super,
|
||||
static char *alloc_addr_str(union scoutfs_inet_addr *ia)
|
||||
{
|
||||
struct in_addr addr;
|
||||
char ip6addr[INET6_ADDRSTRLEN];
|
||||
char *quad;
|
||||
char *str;
|
||||
int len;
|
||||
|
||||
memset(&addr, 0, sizeof(addr));
|
||||
addr.s_addr = htonl(le32_to_cpu(ia->v4.addr));
|
||||
quad = inet_ntoa(addr);
|
||||
if (quad == NULL)
|
||||
return NULL;
|
||||
if (le16_to_cpu(ia->v4.family) == SCOUTFS_AF_IPV4) {
|
||||
memset(&addr, 0, sizeof(addr));
|
||||
addr.s_addr = htonl(le32_to_cpu(ia->v4.addr));
|
||||
quad = inet_ntoa(addr);
|
||||
if (quad == NULL)
|
||||
return NULL;
|
||||
|
||||
len = snprintf(NULL, 0, "%s:%u", quad, le16_to_cpu(ia->v4.port));
|
||||
if (len < 1 || len > 22)
|
||||
return NULL;
|
||||
len = snprintf(NULL, 0, "%s:%u", quad, le16_to_cpu(ia->v4.port));
|
||||
if (len < 1 || len > 22)
|
||||
return NULL;
|
||||
|
||||
len++; /* null */
|
||||
str = malloc(len);
|
||||
if (!str)
|
||||
return NULL;
|
||||
len++; /* null */
|
||||
str = malloc(len);
|
||||
if (!str)
|
||||
return NULL;
|
||||
|
||||
snprintf(str, len, "%s:%u", quad, le16_to_cpu(ia->v4.port));
|
||||
snprintf(str, len, "%s:%u", quad, le16_to_cpu(ia->v4.port));
|
||||
} else if (le16_to_cpu(ia->v6.family) == SCOUTFS_AF_IPV6) {
|
||||
if (inet_ntop(AF_INET6, ia->v6.addr, ip6addr, INET6_ADDRSTRLEN) == NULL)
|
||||
return NULL;
|
||||
|
||||
len = strlen(ip6addr) + 9; /* "[]:\0" (4) plus max strlen(u16) (5) */
|
||||
str = malloc(len);
|
||||
if (!str)
|
||||
return NULL;
|
||||
|
||||
snprintf(str, len, "[%s]:%u", ip6addr, le16_to_cpu(ia->v6.port));
|
||||
} else
|
||||
return NULL;
|
||||
return str;
|
||||
}
|
||||
|
||||
@@ -1026,7 +1049,7 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
printf(" quorum config version %llu\n",
|
||||
le64_to_cpu(super->qconf.version));
|
||||
for (i = 0; i < array_size(super->qconf.slots); i++) {
|
||||
if (super->qconf.slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
|
||||
if (!quorum_slot_present(super, i))
|
||||
continue;
|
||||
|
||||
addr = alloc_addr_str(&super->qconf.slots[i].addr);
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
|
||||
bool quorum_slot_present(struct scoutfs_super_block *super, int i)
|
||||
{
|
||||
return super->qconf.slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
return ((super->qconf.slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) ||
|
||||
(super->qconf.slots[i].addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)));
|
||||
}
|
||||
|
||||
bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
|
||||
@@ -18,35 +19,40 @@ bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
|
||||
struct in_addr in;
|
||||
bool valid = true;
|
||||
char *addr;
|
||||
char ip6addr[INET6_ADDRSTRLEN];
|
||||
int i;
|
||||
int j;
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_NONE))
|
||||
continue;
|
||||
|
||||
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4)) {
|
||||
if (slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (slots[i].addr.v4.addr == slots[j].addr.v4.addr &&
|
||||
slots[i].addr.v4.port == slots[j].addr.v4.port) {
|
||||
in.s_addr =
|
||||
htonl(le32_to_cpu(slots[i].addr.v4.addr));
|
||||
addr = inet_ntoa(in);
|
||||
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
|
||||
i, j, addr,
|
||||
le16_to_cpu(slots[i].addr.v4.port));
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
} else if (slots[i].addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if ((IN6_ARE_ADDR_EQUAL(slots[i].addr.v6.addr, slots[j].addr.v6.addr)) &&
|
||||
(slots[i].addr.v6.port == slots[j].addr.v6.port)) {
|
||||
fprintf(stderr, "quorum slot nr %u and %u have the same address [%s]:%u\n",
|
||||
i, j,
|
||||
inet_ntop(AF_INET6, slots[i].addr.v6.addr, ip6addr, INET6_ADDRSTRLEN),
|
||||
le16_to_cpu(slots[i].addr.v6.port));
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
} else if (slots[i].addr.v6.family != cpu_to_le16(SCOUTFS_AF_NONE)) {
|
||||
fprintf(stderr, "quorum slot nr %u has invalid family %u\n",
|
||||
i, le16_to_cpu(slots[i].addr.v4.family));
|
||||
valid = false;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
|
||||
continue;
|
||||
|
||||
if (slots[i].addr.v4.addr == slots[j].addr.v4.addr &&
|
||||
slots[i].addr.v4.port == slots[j].addr.v4.port) {
|
||||
|
||||
in.s_addr =
|
||||
htonl(le32_to_cpu(slots[i].addr.v4.addr));
|
||||
addr = inet_ntoa(in);
|
||||
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
|
||||
i, j, addr,
|
||||
le16_to_cpu(slots[i].addr.v4.port));
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return valid;
|
||||
@@ -61,19 +67,23 @@ void print_quorum_slots(struct scoutfs_quorum_slot *slots, int nr, char *indent)
|
||||
{
|
||||
struct scoutfs_quorum_slot *sl;
|
||||
struct in_addr in;
|
||||
char ip6addr[INET6_ADDRSTRLEN];
|
||||
bool first = true;
|
||||
int i;
|
||||
|
||||
for (i = 0, sl = slots; i < SCOUTFS_QUORUM_MAX_SLOTS; i++, sl++) {
|
||||
if (sl->addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
|
||||
in.s_addr = htonl(le32_to_cpu(sl->addr.v4.addr));
|
||||
printf("%s%u: %s:%u\n", first ? "" : indent,
|
||||
i, inet_ntoa(in), le16_to_cpu(sl->addr.v4.port));
|
||||
|
||||
if (sl->addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
|
||||
continue;
|
||||
|
||||
in.s_addr = htonl(le32_to_cpu(sl->addr.v4.addr));
|
||||
printf("%s%u: %s:%u\n", first ? "" : indent,
|
||||
i, inet_ntoa(in), le16_to_cpu(sl->addr.v4.port));
|
||||
|
||||
first = false;
|
||||
first = false;
|
||||
} else if (sl->addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
|
||||
printf("%s%u: [%s]:%u\n", first ? "" : indent, i,
|
||||
inet_ntop(AF_INET6, sl->addr.v6.addr, ip6addr, INET6_ADDRSTRLEN),
|
||||
le16_to_cpu(sl->addr.v6.port));
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user