Compare commits

..

5 Commits

Author SHA1 Message Date
Auke Kok
9fe5e895a4 Account for ipv6 in kernel_get{sock,peer}name compat.
These 2 kernel wrapper functions need to be able to properly handle
the ipv6 addrlen, instead of returning -EAFNOSUPPORT(-97).

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-02-02 15:51:06 -05:00
Auke Kok
132d73d435 Add IPv6 support to the kernel module.
This adds IPv6 support to the kernel module side.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-02-02 14:32:32 -05:00
Auke Kok
d2bb5c6cba Enable ipv6 in testing.
Instead of using 127.0.0.1, we initialize the quorum slots to ::1,
enabling all ipv6 support.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-02-02 14:32:32 -05:00
Auke Kok
1031e71b19 Add ipv6 support to scoutfs userspace utility.
This change adds ipv6 support to various scoutfs sub-commands, allowing
users to mkfs, print and change-quorum-config using ipv6 addresses, and
modifies the outputs.

Any ipv6 address/port is displayed as [::1]:5000 to comply with the
related RFC's. Input strings remain consistent as the quorum config
input value is comma-separated already, not posing any issues.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-02-02 14:32:32 -05:00
Auke Kok
90bd7f9f43 Don't stack alloc struct scoutfs_quorum_block_event old
The size of this thing is well over 1kb, and the compiler will
error on several supported distributions that this particular
function reaches over 2k stack frame size, which is excessive,
even for a function that isn't called regularly.

We can allocate the thing in one go if we smartly allocate this
as an array of (an array of structs) which allows us to index
it as a 2d array as before, taking away some of the additional
complexities.

Signed-off-by: Auke Kok <auke.kok@versity.com>
2026-02-02 14:32:32 -05:00
46 changed files with 430 additions and 1791 deletions

View File

@@ -1,33 +1,6 @@
Versity ScoutFS Release Notes
=============================
---
v1.29
\
*Mar 25, 2026*
Add a repair mechanism for mount logs that weren't properly resolved as
mounts left the cluster. The presence of these logs prevents log
merging from making forward progress and the backlog of logs over time
can cause operations to slow to a crawl. With the repair mechanism in
place the orphaned logs don't stop merging and operations proceed as
usual.
Add an ioctl for turning offline unmapped file regions into sparse
regions.
---
v1.28
\
*Feb 5, 2026*
Fix a bug that lead to incorrect negative caching of ACL entries
starting in version 9.6 of distribution kernels in the enterprise linux
family. This would manifest as ACLs seemingly disappearing,
particularly default ACLs on directories. The persistent ACLs always
existed but because of internal API incompatibility some readers
couldn't see them and would cache that they didn't exist.
---
v1.27
\

View File

@@ -2183,8 +2183,6 @@ static int merge_read_item(struct super_block *sb, struct scoutfs_key *key, u64
if (ret > 0) {
if (ret == SCOUTFS_DELTA_COMBINED) {
scoutfs_inc_counter(sb, btree_merge_delta_combined);
if (seq > found->seq)
found->seq = seq;
} else if (ret == SCOUTFS_DELTA_COMBINED_NULL) {
scoutfs_inc_counter(sb, btree_merge_delta_null);
free_mitem(rng, found);
@@ -2488,14 +2486,6 @@ int scoutfs_btree_merge(struct super_block *sb,
mitem = next_mitem(mitem);
free_mitem(&rng, tmp);
}
if (mitem && walk_val_len == 0 &&
!(walk_flags & (BTW_INSERT | BTW_DELETE)) &&
scoutfs_trigger(sb, LOG_MERGE_FORCE_PARTIAL)) {
ret = -ERANGE;
*next_ret = mitem->key;
goto out;
}
}
ret = 0;

View File

@@ -479,7 +479,7 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_mount_options opts;
struct scoutfs_net_greeting greet;
struct sockaddr_in sin;
struct sockaddr_storage sin;
bool am_quorum;
int ret;

View File

@@ -79,10 +79,8 @@ static void item_from_extent(struct scoutfs_key *key,
.skdx_end = cpu_to_le64(start + len - 1),
.skdx_len = cpu_to_le64(len),
};
*dv = (struct scoutfs_data_extent_val) {
.blkno = cpu_to_le64(map),
.flags = flags,
};
dv->blkno = cpu_to_le64(map);
dv->flags = flags;
}
static void ext_from_item(struct scoutfs_extent *ext,
@@ -1517,101 +1515,6 @@ out:
return ret;
}
/*
* Punch holes in offline extents. This is a very specific tool that
* only does one job: it converts extents from offline to sparse. It
* returns an error if it encounters an extent that isn't offline or has
* a block mapping. It ignores i_size completely; it does not test it,
* and does not update it.
*
* The caller has the inode locked in the vfs and performed basic sanity
* checks. We manage transactions and the extent_sem which is ordered
* inside the transaction.
*/
int scoutfs_data_punch_offline(struct inode *inode, u64 iblock, u64 last, u64 data_version,
struct scoutfs_lock *lock)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
struct super_block *sb = inode->i_sb;
struct data_ext_args args = {
.ino = scoutfs_ino(inode),
.inode = inode,
.lock = lock,
};
struct scoutfs_extent ext;
LIST_HEAD(ind_locks);
int ret;
int i;
if (WARN_ON_ONCE(iblock > last)) {
ret = -EINVAL;
goto out;
}
/* idiomatic to call start,last with 0,~0, clamp last to last possible */
last = min(last, SCOUTFS_BLOCK_SM_MAX);
ret = 0;
while (iblock <= last) {
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true, false) ?:
scoutfs_dirty_inode_item(inode, lock);
if (ret < 0)
break;
down_write(&si->extent_sem);
for (i = 0; i < 32 && (iblock <= last); i++) {
ret = scoutfs_ext_next(sb, &data_ext_ops, &args, iblock, 1, &ext);
if (ret == -ENOENT) {
iblock = last + 1;
ret = 0;
break;
}
if (ret < 0)
break;
if (ext.start > last) {
iblock = last + 1;
break;
}
if (ext.map) {
ret = -EINVAL;
break;
}
if (ext.flags & SEF_OFFLINE) {
if (iblock > ext.start) {
ext.len -= iblock - ext.start;
ext.start = iblock;
}
ext.len = min(ext.len, last - ext.start + 1);
ext.flags &= ~SEF_OFFLINE;
ret = scoutfs_ext_set(sb, &data_ext_ops, &args,
ext.start, ext.len, ext.map, ext.flags);
if (ret < 0)
break;
}
iblock = ext.start + ext.len;
}
up_write(&si->extent_sem);
scoutfs_update_inode_item(inode, lock, &ind_locks);
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &ind_locks);
if (ret < 0)
break;
}
out:
return ret;
}
/*
* This copies to userspace :/
*/

View File

@@ -57,8 +57,6 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
u64 byte_len, struct inode *to, u64 to_off, bool to_stage,
u64 data_version);
int scoutfs_data_punch_offline(struct inode *inode, u64 iblock, u64 last, u64 data_version,
struct scoutfs_lock *lock);
int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
u8 sef, u8 op, struct scoutfs_data_wait *ow,

View File

@@ -587,12 +587,10 @@ static int add_entry_items(struct super_block *sb, u64 dir_ino, u64 hash,
}
/* initialize the dent */
*dent = (struct scoutfs_dirent) {
.ino = cpu_to_le64(ino),
.hash = cpu_to_le64(hash),
.pos = cpu_to_le64(pos),
.type = mode_to_type(mode),
};
dent->ino = cpu_to_le64(ino);
dent->hash = cpu_to_le64(hash);
dent->pos = cpu_to_le64(pos);
dent->type = mode_to_type(mode);
memcpy(dent->name, name, name_len);
init_dirent_key(&ent_key, SCOUTFS_DIRENT_TYPE, dir_ino, hash, pos);

View File

@@ -25,6 +25,7 @@
#include "sysfs.h"
#include "server.h"
#include "fence.h"
#include "net.h"
/*
* Fencing ensures that a given mount can no longer write to the
@@ -79,7 +80,7 @@ struct pending_fence {
struct timer_list timer;
ktime_t start_kt;
__be32 ipv4_addr;
union scoutfs_inet_addr addr;
bool fenced;
bool error;
int reason;
@@ -171,14 +172,19 @@ static ssize_t error_store(struct kobject *kobj, struct kobj_attribute *attr, co
}
SCOUTFS_ATTR_RW(error);
static ssize_t ipv4_addr_show(struct kobject *kobj,
static ssize_t inet_addr_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
struct sockaddr_storage sin;
return snprintf(buf, PAGE_SIZE, "%pI4", &fence->ipv4_addr);
memset(&sin, 0, sizeof(struct sockaddr_storage));
scoutfs_addr_to_sin(&sin, &fence->addr);
return snprintf(buf, PAGE_SIZE, "%pISc", SIN_ARG(&sin));
}
SCOUTFS_ATTR_RO(ipv4_addr);
SCOUTFS_ATTR_RO(inet_addr);
static ssize_t reason_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -212,7 +218,7 @@ static struct attribute *fence_attrs[] = {
SCOUTFS_ATTR_PTR(elapsed_secs),
SCOUTFS_ATTR_PTR(fenced),
SCOUTFS_ATTR_PTR(error),
SCOUTFS_ATTR_PTR(ipv4_addr),
SCOUTFS_ATTR_PTR(inet_addr),
SCOUTFS_ATTR_PTR(reason),
SCOUTFS_ATTR_PTR(rid),
NULL,
@@ -232,7 +238,7 @@ static void fence_timeout(struct timer_list *timer)
wake_up(&fi->waitq);
}
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason)
int scoutfs_fence_start(struct super_block *sb, u64 rid, union scoutfs_inet_addr *addr, int reason)
{
DECLARE_FENCE_INFO(sb, fi);
struct pending_fence *fence;
@@ -248,7 +254,7 @@ int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int r
scoutfs_sysfs_init_attrs(sb, &fence->ssa);
fence->start_kt = ktime_get();
fence->ipv4_addr = ipv4_addr;
memcpy(&fence->addr, addr, sizeof(union scoutfs_inet_addr));
fence->fenced = false;
fence->error = false;
fence->reason = reason;

View File

@@ -7,7 +7,7 @@ enum {
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER,
};
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason);
int scoutfs_fence_start(struct super_block *sb, u64 rid, union scoutfs_inet_addr *addr, int reason);
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error);
int scoutfs_fence_reason_pending(struct super_block *sb, int reason);
int scoutfs_fence_free(struct super_block *sb, u64 rid);

View File

@@ -239,9 +239,9 @@ static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u6
* to reset their state and retry with a newer version of the btrees.
*/
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
u64 merge_input_seq, struct scoutfs_key *key,
struct scoutfs_key *bloom_key, struct scoutfs_key *start,
struct scoutfs_key *end, scoutfs_forest_item_cb cb, void *arg)
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
struct scoutfs_key *start, struct scoutfs_key *end,
scoutfs_forest_item_cb cb, void *arg)
{
struct forest_read_items_data rid = {
.cb = cb,
@@ -317,17 +317,15 @@ int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_r
scoutfs_inc_counter(sb, forest_bloom_pass);
if ((le64_to_cpu(lt.flags) & SCOUTFS_LOG_TREES_FINALIZED) &&
(merge_input_seq == 0 ||
le64_to_cpu(lt.finalize_seq) < merge_input_seq))
rid.fic |= FIC_MERGE_INPUT;
if ((le64_to_cpu(lt.flags) & SCOUTFS_LOG_TREES_FINALIZED))
rid.fic |= FIC_FINALIZED;
ret = scoutfs_btree_read_items(sb, &lt.item_root, key, start,
end, forest_read_items, &rid);
if (ret < 0)
goto out;
rid.fic &= ~FIC_MERGE_INPUT;
rid.fic &= ~FIC_FINALIZED;
}
ret = 0;
@@ -347,7 +345,7 @@ int scoutfs_forest_read_items(struct super_block *sb,
ret = scoutfs_client_get_roots(sb, &roots);
if (ret == 0)
ret = scoutfs_forest_read_items_roots(sb, &roots, 0, key, bloom_key, start, end,
ret = scoutfs_forest_read_items_roots(sb, &roots, key, bloom_key, start, end,
cb, arg);
return ret;
}
@@ -795,7 +793,7 @@ out:
if (ret)
scoutfs_forest_destroy(sb);
return ret;
return 0;
}
void scoutfs_forest_start(struct super_block *sb)

View File

@@ -11,7 +11,7 @@ struct scoutfs_lock;
/* caller gives an item to the callback */
enum {
FIC_FS_ROOT = (1 << 0),
FIC_MERGE_INPUT = (1 << 1),
FIC_FINALIZED = (1 << 1),
};
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb, struct scoutfs_key *key, u64 seq,
u8 flags, void *val, int val_len, int fic, void *arg);
@@ -25,9 +25,9 @@ int scoutfs_forest_read_items(struct super_block *sb,
struct scoutfs_key *end,
scoutfs_forest_item_cb cb, void *arg);
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
u64 merge_input_seq, struct scoutfs_key *key,
struct scoutfs_key *bloom_key, struct scoutfs_key *start,
struct scoutfs_key *end, scoutfs_forest_item_cb cb, void *arg);
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
struct scoutfs_key *start, struct scoutfs_key *end,
scoutfs_forest_item_cb cb, void *arg);
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
struct scoutfs_lock *lock);
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq);

View File

@@ -415,6 +415,8 @@ static long scoutfs_ioc_data_wait_err(struct file *file, unsigned long arg)
return 0;
if ((args.op & SCOUTFS_IOC_DWO_UNKNOWN) || !IS_ERR_VALUE(args.err))
return -EINVAL;
if ((args.op & SCOUTFS_IOC_DWO_UNKNOWN) || !IS_ERR_VALUE(args.err))
return -EINVAL;
trace_scoutfs_ioc_data_wait_err(sb, &args);
@@ -1667,78 +1669,6 @@ out:
return ret;
}
static long scoutfs_ioc_punch_offline(struct file *file, unsigned long arg)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct scoutfs_ioctl_punch_offline __user *upo = (void __user *)arg;
struct scoutfs_ioctl_punch_offline po;
struct scoutfs_lock *lock = NULL;
u64 iblock;
u64 last;
u64 tmp;
int ret;
if (copy_from_user(&po, upo, sizeof(po)))
return -EFAULT;
if (po.len == 0)
return 0;
if (check_add_overflow(po.offset, po.len - 1, &tmp) ||
(po.offset & SCOUTFS_BLOCK_SM_MASK) ||
(po.len & SCOUTFS_BLOCK_SM_MASK))
return -EOVERFLOW;
if (po.flags)
return -EINVAL;
ret = mnt_want_write_file(file);
if (ret < 0)
return ret;
inode_lock(inode);
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
if (ret)
goto out;
if (!S_ISREG(inode->i_mode)) {
ret = -EINVAL;
goto out;
}
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EINVAL;
goto out;
}
ret = inode_permission(KC_VFS_INIT_NS inode, MAY_WRITE);
if (ret < 0)
goto out;
if (scoutfs_inode_data_version(inode) != po.data_version) {
ret = -ESTALE;
goto out;
}
if ((ret = scoutfs_inode_check_retention(inode)))
goto out;
iblock = po.offset >> SCOUTFS_BLOCK_SM_SHIFT;
last = (po.offset + po.len - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
ret = scoutfs_data_punch_offline(inode, iblock, last, po.data_version, lock);
out:
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
inode_unlock(inode);
mnt_drop_write_file(file);
return ret;
}
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -1788,8 +1718,6 @@ long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return scoutfs_ioc_mod_quota_rule(file, arg, false);
case SCOUTFS_IOC_READ_XATTR_INDEX:
return scoutfs_ioc_read_xattr_index(file, arg);
case SCOUTFS_IOC_PUNCH_OFFLINE:
return scoutfs_ioc_punch_offline(file, arg);
}
return -ENOTTY;

View File

@@ -848,32 +848,4 @@ struct scoutfs_ioctl_read_xattr_index {
#define SCOUTFS_IOC_READ_XATTR_INDEX \
_IOR(SCOUTFS_IOCTL_MAGIC, 23, struct scoutfs_ioctl_read_xattr_index)
/*
* This is a limited and specific version of hole punching. It's an
* archive layer operation that only converts unmapped offline extents
* into sparse extents. It is intended to be used when restoring sparse
* files after the initial creation set the entire file size offline.
*
* The offset and len fields are in units of bytes and must be aligned
* to the small (4KiB) block size. All regions of offline extents
* covered by the region will be converted into sparse online extents,
* including regions that straddle the boundaries of the region. Any
* existing sparse extents in the region are ignored.
*
* The data_version must match the inode or EINVAL is returned. The
* data_version is not modified by this operation.
*
* EINVAL is returned if any mapped extents are found in the region. If
* an error is returned then partial progress may have been made.
*/
struct scoutfs_ioctl_punch_offline {
__u64 offset;
__u64 len;
__u64 data_version;
__u64 flags;
};
#define SCOUTFS_IOC_PUNCH_OFFLINE \
_IOW(SCOUTFS_IOCTL_MAGIC, 24, struct scoutfs_ioctl_punch_offline)
#endif

View File

@@ -195,9 +195,11 @@ struct kc_shrinker_wrapper {
#include <linux/inet.h>
static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *addr)
{
int addrlen = sizeof(struct sockaddr_in);
int addrlen = sizeof(struct sockaddr_storage);
int ret = kernel_getsockname(sock, addr, &addrlen);
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
if (ret == 0 && (!(
(addrlen == sizeof(struct sockaddr_in)) ||
(addrlen == sizeof(struct sockaddr_in6)))))
return -EAFNOSUPPORT;
else if (ret < 0)
return ret;
@@ -206,9 +208,11 @@ static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *ad
}
static inline int kc_kernel_getpeername(struct socket *sock, struct sockaddr *addr)
{
int addrlen = sizeof(struct sockaddr_in);
int addrlen = sizeof(struct sockaddr_storage);
int ret = kernel_getpeername(sock, addr, &addrlen);
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
if (ret == 0 && (!(
(addrlen == sizeof(struct sockaddr_in)) ||
(addrlen == sizeof(struct sockaddr_in6)))))
return -EAFNOSUPPORT;
else if (ret < 0)
return ret;

View File

@@ -813,7 +813,6 @@ int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
out:
if (!lock) {
kfree(ireq);
ret = scoutfs_client_lock_response(sb, net_id, nl);
BUG_ON(ret); /* lock server doesn't fence timed out client requests */
}

View File

@@ -336,7 +336,7 @@ static inline u8 net_err_from_host(struct super_block *sb, int error)
error);
}
return SCOUTFS_NET_ERR_EINVAL;
return -EINVAL;
}
return net_errs[ind];
@@ -525,7 +525,7 @@ static int process_response(struct scoutfs_net_connection *conn,
struct super_block *sb = conn->sb;
struct message_send *msend;
scoutfs_net_response_t resp_func = NULL;
void *resp_data = NULL;
void *resp_data;
spin_lock(&conn->lock);
@@ -804,7 +804,7 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
if (invalid_message(conn, nh)) {
scoutfs_inc_counter(sb, net_recv_invalid_message);
ret = -EBADMSG;
goto out;
break;
}
data_len = le16_to_cpu(nh->data_len);
@@ -1218,7 +1218,8 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
trace_scoutfs_net_connect_work_enter(sb, 0, 0);
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
ret = kc_sock_create_kern(conn->connect_sin.ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret)
goto out;
@@ -1239,7 +1240,9 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
trace_scoutfs_conn_connect_start(conn);
ret = kernel_connect(sock, (struct sockaddr *)&conn->connect_sin,
sizeof(struct sockaddr_in), 0);
conn->connect_sin.ss_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6),
0);
if (ret)
goto out;
@@ -1281,6 +1284,13 @@ static bool empty_accepted_list(struct scoutfs_net_connection *conn)
return empty;
}
/*
* sockaddr_storage wraps both _in and _in6, which have _port always
* __be16 at the same offset, and we only need to test whether it's
* zero.
*/
#define sockaddr_port_is_nonzero(sin) ((sin).__data[0] || (sin).__data[1])
/*
* Safely shut down an active connection. This can be triggered by
* errors in workers or by an external call to free the connection. The
@@ -1304,7 +1314,7 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
trace_scoutfs_conn_shutdown_start(conn);
/* connected and accepted conns print a message */
if (conn->peername.sin_port != 0)
if (sockaddr_port_is_nonzero(conn->peername))
scoutfs_info(sb, "%s "SIN_FMT" -> "SIN_FMT,
conn->listening_conn ? "server closing" :
"client disconnected",
@@ -1434,6 +1444,7 @@ static void scoutfs_net_reconn_free_worker(struct work_struct *work)
DEFINE_CONN_FROM_WORK(conn, work, reconn_free_dwork.work);
struct super_block *sb = conn->sb;
struct scoutfs_net_connection *acc;
union scoutfs_inet_addr addr;
unsigned long now = jiffies;
unsigned long deadline = 0;
bool requeue = false;
@@ -1454,8 +1465,9 @@ restart:
if (!test_conn_fl(conn, shutting_down)) {
scoutfs_info(sb, "client "SIN_FMT" reconnect timed out, fencing",
SIN_ARG(&acc->last_peername));
scoutfs_sin_to_addr(&addr, &acc->last_peername);
ret = scoutfs_fence_start(sb, acc->rid,
acc->last_peername.sin_addr.s_addr,
&addr,
SCOUTFS_FENCE_CLIENT_RECONNECT);
if (ret) {
scoutfs_err(sb, "client fence returned err %d, shutting down server",
@@ -1538,9 +1550,9 @@ scoutfs_net_alloc_conn(struct super_block *sb,
conn->req_funcs = req_funcs;
spin_lock_init(&conn->lock);
init_waitqueue_head(&conn->waitq);
conn->sockname.sin_family = AF_INET;
conn->peername.sin_family = AF_INET;
conn->last_peername.sin_family = AF_INET;
conn->sockname.ss_family = AF_UNSPEC;
conn->peername.ss_family = AF_UNSPEC;
conn->last_peername.ss_family = AF_UNSPEC;
INIT_LIST_HEAD(&conn->accepted_head);
INIT_LIST_HEAD(&conn->accepted_list);
conn->next_send_seq = 1;
@@ -1619,7 +1631,7 @@ void scoutfs_net_free_conn(struct super_block *sb,
*/
int scoutfs_net_bind(struct super_block *sb,
struct scoutfs_net_connection *conn,
struct sockaddr_in *sin)
struct sockaddr_storage *sin)
{
struct socket *sock = NULL;
int addrlen;
@@ -1630,7 +1642,7 @@ int scoutfs_net_bind(struct super_block *sb,
if (WARN_ON_ONCE(conn->sock))
return -EINVAL;
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
ret = kc_sock_create_kern(sin->ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret)
goto out;
@@ -1642,7 +1654,7 @@ int scoutfs_net_bind(struct super_block *sb,
if (ret)
goto out;
addrlen = sizeof(struct sockaddr_in);
addrlen = sin->ss_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
ret = kernel_bind(sock, (struct sockaddr *)sin, addrlen);
if (ret)
goto out;
@@ -1658,7 +1670,7 @@ int scoutfs_net_bind(struct super_block *sb,
ret = 0;
conn->sock = sock;
*sin = conn->sockname;
sin = (struct sockaddr_storage *)&conn->sockname;
out:
if (ret < 0 && sock)
@@ -1693,7 +1705,7 @@ static bool connect_result(struct scoutfs_net_connection *conn, int *error)
done = true;
*error = 0;
} else if (test_conn_fl(conn, shutting_down) ||
conn->connect_sin.sin_family == 0) {
conn->connect_sin.ss_family == AF_UNSPEC) {
done = true;
*error = -ESHUTDOWN;
}
@@ -1714,7 +1726,7 @@ static bool connect_result(struct scoutfs_net_connection *conn, int *error)
*/
int scoutfs_net_connect(struct super_block *sb,
struct scoutfs_net_connection *conn,
struct sockaddr_in *sin, unsigned long timeout_ms)
struct sockaddr_storage *sin, unsigned long timeout_ms)
{
int ret = 0;

View File

@@ -49,15 +49,15 @@ struct scoutfs_net_connection {
unsigned long flags; /* CONN_FL_* bitmask */
unsigned long reconn_deadline;
struct sockaddr_in connect_sin;
struct sockaddr_storage connect_sin;
unsigned long connect_timeout_ms;
struct socket *sock;
u64 rid;
u64 greeting_id;
struct sockaddr_in sockname;
struct sockaddr_in peername;
struct sockaddr_in last_peername;
struct sockaddr_storage sockname;
struct sockaddr_storage peername;
struct sockaddr_storage last_peername;
struct list_head accepted_head;
struct scoutfs_net_connection *listening_conn;
@@ -99,27 +99,44 @@ enum conn_flags {
CONN_FL_reconn_freeing = (1UL << 6), /* waiting done, setter frees */
};
#define SIN_FMT "%pIS:%u"
#define SIN_ARG(sin) sin, be16_to_cpu((sin)->sin_port)
#define SIN_FMT "%pISpc"
#define SIN_ARG(sin) sin
static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
static inline void scoutfs_addr_to_sin(struct sockaddr_storage *sin,
union scoutfs_inet_addr *addr)
{
BUG_ON(addr->v4.family != cpu_to_le16(SCOUTFS_AF_IPV4));
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->v4.addr));
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
if (addr->v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
memset(sin, 0, sizeof(struct sockaddr_storage));
sin4->sin_family = AF_INET;
sin4->sin_addr.s_addr = cpu_to_be32(le32_to_cpu(addr->v4.addr));
sin4->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
} else if (addr->v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
memset(sin, 0, sizeof(struct sockaddr_storage));
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr.in6_u.u6_addr8, &addr->v6.addr, 16);
sin6->sin6_port = cpu_to_be16(le16_to_cpu(addr->v6.port));
} else
BUG();
}
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_in *sin)
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_storage *sin)
{
BUG_ON(sin->sin_family != AF_INET);
memset(addr, 0, sizeof(union scoutfs_inet_addr));
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
addr->v4.addr = be32_to_le32(sin->sin_addr.s_addr);
addr->v4.port = be16_to_le16(sin->sin_port);
if (sin->ss_family == AF_INET) {
struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
memset(addr, 0, sizeof(union scoutfs_inet_addr));
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
addr->v4.addr = be32_to_le32(sin4->sin_addr.s_addr);
addr->v4.port = be16_to_le16(sin4->sin_port);
} else if (sin->ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
memset(addr, 0, sizeof(union scoutfs_inet_addr));
addr->v6.family = cpu_to_le16(SCOUTFS_AF_IPV6);
memcpy(&addr->v6.addr, &sin6->sin6_addr.in6_u.u6_addr8, 16);
addr->v6.port = be16_to_le16(sin6->sin6_port);
} else
BUG();
}
struct scoutfs_net_connection *
@@ -130,10 +147,10 @@ scoutfs_net_alloc_conn(struct super_block *sb,
u64 scoutfs_net_client_rid(struct scoutfs_net_connection *conn);
int scoutfs_net_connect(struct super_block *sb,
struct scoutfs_net_connection *conn,
struct sockaddr_in *sin, unsigned long timeout_ms);
struct sockaddr_storage *sin, unsigned long timeout_ms);
int scoutfs_net_bind(struct super_block *sb,
struct scoutfs_net_connection *conn,
struct sockaddr_in *sin);
struct sockaddr_storage *sin);
void scoutfs_net_listen(struct super_block *sb,
struct scoutfs_net_connection *conn);
int scoutfs_net_submit_request(struct super_block *sb,

View File

@@ -145,14 +145,26 @@ struct quorum_info {
#define DECLARE_QUORUM_INFO_KOBJ(kobj, name) \
DECLARE_QUORUM_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
static bool quorum_slot_present(struct scoutfs_quorum_config *qconf, int i)
static bool quorum_slot_ipv4(struct scoutfs_quorum_config *qconf, int i)
{
BUG_ON(i < 0 || i > SCOUTFS_QUORUM_MAX_SLOTS);
return qconf->slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4);
}
static void quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_in *sin)
static bool quorum_slot_ipv6(struct scoutfs_quorum_config *qconf, int i)
{
BUG_ON(i < 0 || i > SCOUTFS_QUORUM_MAX_SLOTS);
return qconf->slots[i].addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6);
}
static bool quorum_slot_present(struct scoutfs_quorum_config *qconf, int i)
{
return quorum_slot_ipv4(qconf, i) || quorum_slot_ipv6(qconf, i);
}
static void quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_storage *sin)
{
BUG_ON(i < 0 || i >= SCOUTFS_QUORUM_MAX_SLOTS);
@@ -179,11 +191,18 @@ static int create_socket(struct super_block *sb)
{
DECLARE_QUORUM_INFO(sb, qinf);
struct socket *sock = NULL;
struct sockaddr_in sin;
struct sockaddr_storage sin;
struct scoutfs_quorum_slot slot = qinf->qconf.slots[qinf->our_quorum_slot_nr];
int addrlen;
int ret;
ret = kc_sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (le16_to_cpu(slot.addr.v4.family) == SCOUTFS_AF_IPV4)
ret = kc_sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
else if (le16_to_cpu(slot.addr.v6.family) == SCOUTFS_AF_IPV6)
ret = kc_sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
else
BUG();
if (ret) {
scoutfs_err(sb, "quorum couldn't create udp socket: %d", ret);
goto out;
@@ -192,9 +211,9 @@ static int create_socket(struct super_block *sb)
/* rather fail and retry than block waiting for free */
sock->sk->sk_allocation = GFP_ATOMIC;
addrlen = (le16_to_cpu(slot.addr.v4.family) == SCOUTFS_AF_IPV4) ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
quorum_slot_sin(&qinf->qconf, qinf->our_quorum_slot_nr, &sin);
addrlen = sizeof(sin);
ret = kernel_bind(sock, (struct sockaddr *)&sin, addrlen);
if (ret) {
scoutfs_err(sb, "quorum failed to bind udp socket to "SIN_FMT": %d",
@@ -241,7 +260,7 @@ static int send_msg_members(struct super_block *sb, int type, u64 term, int only
.iov_base = &qmes,
.iov_len = sizeof(qmes),
};
struct sockaddr_in sin;
struct sockaddr_storage sin;
struct msghdr mh = {
.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
.msg_name = &sin,
@@ -542,10 +561,11 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_c
u64 term)
{
#define NR_OLD 2
struct scoutfs_quorum_block_event old[SCOUTFS_QUORUM_MAX_SLOTS][NR_OLD] = {{{0,}}};
struct scoutfs_quorum_block_event (*old)[NR_OLD];
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_quorum_block blk;
struct sockaddr_in sin;
struct sockaddr_storage sin;
union scoutfs_inet_addr addr;
const __le64 lefsid = cpu_to_le64(sbi->fsid);
const u64 rid = sbi->rid;
bool fence_started = false;
@@ -558,13 +578,20 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_c
BUILD_BUG_ON(SCOUTFS_QUORUM_BLOCKS < SCOUTFS_QUORUM_MAX_SLOTS);
old = kmalloc(NR_OLD * SCOUTFS_QUORUM_MAX_SLOTS * sizeof(struct scoutfs_quorum_block_event), GFP_KERNEL);
if (!old) {
ret = -ENOMEM;
goto out;
}
memset(old, 0, NR_OLD * SCOUTFS_QUORUM_MAX_SLOTS * sizeof(struct scoutfs_quorum_block_event));
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
if (!quorum_slot_present(qconf, i))
continue;
ret = read_quorum_block(sb, SCOUTFS_QUORUM_BLKNO + i, &blk, false);
if (ret < 0)
goto out;
goto out_free;
/* elected leader still running */
if (le64_to_cpu(blk.events[SCOUTFS_QUORUM_EVENT_ELECT].term) >
@@ -598,14 +625,17 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_c
scoutfs_info(sb, "fencing previous leader "SCSBF" at term %llu in slot %u with address "SIN_FMT,
SCSB_LEFR_ARGS(lefsid, fence_rid),
le64_to_cpu(old[i][j].term), i, SIN_ARG(&sin));
ret = scoutfs_fence_start(sb, le64_to_cpu(fence_rid), sin.sin_addr.s_addr,
scoutfs_sin_to_addr(&addr, &sin);
ret = scoutfs_fence_start(sb, le64_to_cpu(fence_rid), &addr,
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER);
if (ret < 0)
goto out;
goto out_free;
fence_started = true;
}
}
out_free:
kfree(old);
out:
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
if (ret == 0)
@@ -708,7 +738,7 @@ static void scoutfs_quorum_worker(struct work_struct *work)
struct quorum_info *qinf = container_of(work, struct quorum_info, work);
struct scoutfs_mount_options opts;
struct super_block *sb = qinf->sb;
struct sockaddr_in unused;
struct sockaddr_storage unused;
struct quorum_host_msg msg;
struct quorum_status qst = {0,};
struct hb_recording hbr;
@@ -990,7 +1020,7 @@ out:
* leader with the greatest elected term. If we get it wrong the
* connection will timeout and the client will try again.
*/
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin)
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_storage *sin)
{
struct scoutfs_super_block *super = NULL;
struct scoutfs_quorum_block blk;
@@ -1049,7 +1079,7 @@ u8 scoutfs_quorum_votes_needed(struct super_block *sb)
return qinf->votes_needed;
}
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_in *sin)
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i, struct sockaddr_storage *sin)
{
return quorum_slot_sin(qconf, i, sin);
}
@@ -1195,8 +1225,8 @@ static struct attribute *quorum_attrs[] = {
static inline bool valid_ipv4_unicast(__be32 addr)
{
return !(ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
ipv4_is_zeronet(addr) || ipv4_is_local_multicast(addr));
return !(ipv4_is_multicast(addr) && ipv4_is_lbcast(addr) &&
ipv4_is_zeronet(addr) && ipv4_is_local_multicast(addr));
}
static inline bool valid_ipv4_port(__be16 port)
@@ -1208,8 +1238,12 @@ static int verify_quorum_slots(struct super_block *sb, struct quorum_info *qinf,
struct scoutfs_quorum_config *qconf)
{
char slots[(SCOUTFS_QUORUM_MAX_SLOTS * 3) + 1];
struct sockaddr_in other;
struct sockaddr_in sin;
struct sockaddr_storage other;
struct sockaddr_storage sin;
struct sockaddr_in *sin4;
struct sockaddr_in *other4;
struct sockaddr_in6 *sin6;
struct sockaddr_in6 *other6;
int found = 0;
int ret;
int i;
@@ -1220,35 +1254,78 @@ static int verify_quorum_slots(struct super_block *sb, struct quorum_info *qinf,
if (!quorum_slot_present(qconf, i))
continue;
scoutfs_quorum_slot_sin(qconf, i, &sin);
if (quorum_slot_ipv4(qconf, i)) {
scoutfs_quorum_slot_sin(qconf, i, &sin);
sin4 = (struct sockaddr_in *)&sin;
if (!valid_ipv4_unicast(sin.sin_addr.s_addr)) {
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 unicast address: "SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
if (!valid_ipv4_port(sin.sin_port)) {
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 port number:"SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if (!quorum_slot_present(qconf, j))
continue;
scoutfs_quorum_slot_sin(qconf, j, &other);
if (sin.sin_addr.s_addr == other.sin_addr.s_addr &&
sin.sin_port == other.sin_port) {
scoutfs_err(sb, "quorum slots #%u and #%u have the same address: "SIN_FMT,
i, j, SIN_ARG(&sin));
if (!valid_ipv4_unicast(sin4->sin_addr.s_addr)) {
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 unicast address: "SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
}
found++;
if (!valid_ipv4_port(sin4->sin_port)) {
scoutfs_err(sb, "quorum slot #%d has invalid ipv4 port number:"SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if (!quorum_slot_ipv4(qconf, j))
continue;
scoutfs_quorum_slot_sin(qconf, j, &other);
other4 = (struct sockaddr_in *)&other;
if (sin4->sin_addr.s_addr == other4->sin_addr.s_addr &&
sin4->sin_port == other4->sin_port) {
scoutfs_err(sb, "quorum slots #%u and #%u have the same address: "SIN_FMT,
i, j, SIN_ARG(&sin));
return -EINVAL;
}
}
found++;
} else if (quorum_slot_ipv6(qconf, i)) {
quorum_slot_sin(qconf, i, &sin);
sin6 = (struct sockaddr_in6 *)&sin;
if ((sin6->sin6_addr.in6_u.u6_addr32[0] == 0) && (sin6->sin6_addr.in6_u.u6_addr32[1] == 0) &&
(sin6->sin6_addr.in6_u.u6_addr32[2] == 0) && (sin6->sin6_addr.in6_u.u6_addr32[3] == 0)) {
scoutfs_err(sb, "quorum slot #%d has unspecified ipv6 address:"SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
if (sin6->sin6_addr.in6_u.u6_addr8[0] == 0xff) {
scoutfs_err(sb, "quorum slot #%d has multicast ipv6 address:"SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
if (!valid_ipv4_port(sin6->sin6_port)) {
scoutfs_err(sb, "quorum slot #%d has invalid ipv6 port number:"SIN_FMT,
i, SIN_ARG(&sin));
return -EINVAL;
}
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if (!quorum_slot_ipv6(qconf, j))
continue;
quorum_slot_sin(qconf, j, &other);
other6 = (struct sockaddr_in6 *)&other;
if ((ipv6_addr_equal(&sin6->sin6_addr, &other6->sin6_addr)) &&
(sin6->sin6_port == other6->sin6_port)) {
scoutfs_err(sb, "quorum slots #%u and #%u have the same address: "SIN_FMT,
i, j, SIN_ARG(&sin));
return -EINVAL;
}
}
found++;
}
}
if (found == 0) {

View File

@@ -1,11 +1,11 @@
#ifndef _SCOUTFS_QUORUM_H_
#define _SCOUTFS_QUORUM_H_
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_storage *sin);
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i,
struct sockaddr_in *sin);
struct sockaddr_storage *sin);
int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_config *qconf,
u64 term);

View File

@@ -1355,35 +1355,37 @@ DEFINE_EVENT(scoutfs_lock_class, scoutfs_lock_shrink,
);
DECLARE_EVENT_CLASS(scoutfs_net_class,
TP_PROTO(struct super_block *sb, struct sockaddr_in *name,
struct sockaddr_in *peer, struct scoutfs_net_header *nh),
TP_PROTO(struct super_block *sb, struct sockaddr_storage *name,
struct sockaddr_storage *peer, struct scoutfs_net_header *nh),
TP_ARGS(sb, name, peer, nh),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
si4_trace_define(name)
si4_trace_define(peer)
__field_struct(struct sockaddr_storage, name)
__field_struct(struct sockaddr_storage, peer)
snh_trace_define(nh)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
si4_trace_assign(name, name);
si4_trace_assign(peer, peer);
memcpy(&__entry->name, name, sizeof(struct sockaddr_storage));
memcpy(&__entry->peer, peer, sizeof(struct sockaddr_storage));
snh_trace_assign(nh, nh);
),
TP_printk(SCSBF" name "SI4_FMT" peer "SI4_FMT" nh "SNH_FMT,
SCSB_TRACE_ARGS, si4_trace_args(name), si4_trace_args(peer),
TP_printk(SCSBF" name "SIN_FMT" peer "SIN_FMT" nh "SNH_FMT,
SCSB_TRACE_ARGS,
&__entry->name,
&__entry->peer,
snh_trace_args(nh))
);
DEFINE_EVENT(scoutfs_net_class, scoutfs_net_send_message,
TP_PROTO(struct super_block *sb, struct sockaddr_in *name,
struct sockaddr_in *peer, struct scoutfs_net_header *nh),
TP_PROTO(struct super_block *sb, struct sockaddr_storage *name,
struct sockaddr_storage *peer, struct scoutfs_net_header *nh),
TP_ARGS(sb, name, peer, nh)
);
DEFINE_EVENT(scoutfs_net_class, scoutfs_net_recv_message,
TP_PROTO(struct super_block *sb, struct sockaddr_in *name,
struct sockaddr_in *peer, struct scoutfs_net_header *nh),
TP_PROTO(struct super_block *sb, struct sockaddr_storage *name,
struct sockaddr_storage *peer, struct scoutfs_net_header *nh),
TP_ARGS(sb, name, peer, nh)
);
@@ -1416,8 +1418,8 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
__field(void *, sock)
__field(__u64, c_rid)
__field(__u64, greeting_id)
si4_trace_define(sockname)
si4_trace_define(peername)
__field_struct(struct sockaddr_storage, sockname)
__field_struct(struct sockaddr_storage, peername)
__field(unsigned char, e_accepted_head)
__field(void *, listening_conn)
__field(unsigned char, e_accepted_list)
@@ -1435,8 +1437,8 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
__entry->sock = conn->sock;
__entry->c_rid = conn->rid;
__entry->greeting_id = conn->greeting_id;
si4_trace_assign(sockname, &conn->sockname);
si4_trace_assign(peername, &conn->peername);
memcpy(&__entry->sockname, &conn->sockname, sizeof(struct sockaddr_storage));
memcpy(&__entry->peername, &conn->peername, sizeof(struct sockaddr_storage));
__entry->e_accepted_head = !!list_empty(&conn->accepted_head);
__entry->listening_conn = conn->listening_conn;
__entry->e_accepted_list = !!list_empty(&conn->accepted_list);
@@ -1446,7 +1448,7 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
__entry->e_resend_queue = !!list_empty(&conn->resend_queue);
__entry->recv_seq = atomic64_read(&conn->recv_seq);
),
TP_printk(SCSBF" flags %s rc_dl %lu cto %lu sk %p rid %llu grid %llu sn "SI4_FMT" pn "SI4_FMT" eah %u lc %p eal %u nss %llu nsi %llu esq %u erq %u rs %llu",
TP_printk(SCSBF" flags %s rc_dl %lu cto %lu sk %p rid %llu grid %llu sn "SIN_FMT" pn "SIN_FMT" eah %u lc %p eal %u nss %llu nsi %llu esq %u erq %u rs %llu",
SCSB_TRACE_ARGS,
print_conn_flags(__entry->flags),
__entry->reconn_deadline,
@@ -1454,8 +1456,8 @@ DECLARE_EVENT_CLASS(scoutfs_net_conn_class,
__entry->sock,
__entry->c_rid,
__entry->greeting_id,
si4_trace_args(sockname),
si4_trace_args(peername),
&__entry->sockname,
&__entry->peername,
__entry->e_accepted_head,
__entry->listening_conn,
__entry->e_accepted_list,

View File

@@ -256,14 +256,6 @@ static void server_down(struct server_info *server)
cmpxchg(&server->status, was, SERVER_DOWN);
}
static void init_mounted_client_key(struct scoutfs_key *key, u64 rid)
{
*key = (struct scoutfs_key) {
.sk_zone = SCOUTFS_MOUNTED_CLIENT_ZONE,
.skmc_rid = cpu_to_le64(rid),
};
}
/*
* The per-holder allocation block use budget balances batching
* efficiency and concurrency. The larger this gets, the fewer
@@ -971,28 +963,6 @@ static int find_log_trees_item(struct super_block *sb,
return ret;
}
/*
* Return true if the given rid has a mounted_clients entry.
*/
static bool rid_is_mounted(struct super_block *sb, u64 rid)
{
DECLARE_SERVER_INFO(sb, server);
struct scoutfs_super_block *super = DIRTY_SUPER_SB(sb);
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
int ret;
init_mounted_client_key(&key, rid);
mutex_lock(&server->mounted_clients_mutex);
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key, &iref);
if (ret == 0)
scoutfs_btree_put_iref(&iref);
mutex_unlock(&server->mounted_clients_mutex);
return ret == 0;
}
/*
* Find the log_trees item with the greatest nr for each rid. Fills the
* caller's log_trees and sets the key before the returned log_trees for
@@ -1251,60 +1221,6 @@ static int do_finalize_ours(struct super_block *sb,
* happens to arrive at just the right time. That's fine, merging will
* ignore and tear down the empty input.
*/
static int reclaim_open_log_tree(struct super_block *sb, u64 rid);
/*
* Reclaim log trees for rids that have no mounted_clients entry.
* They block merges by appearing active. reclaim_open_log_tree
* may need multiple commits to drain allocators (-EINPROGRESS).
*
* The caller holds logs_mutex and a commit, both are dropped and
* re-acquired around each reclaim call. Returns >0 if any orphans
* were reclaimed so the caller can re-check state that may have
* changed while the lock was dropped.
*/
static int reclaim_orphan_log_trees(struct super_block *sb, u64 rid,
struct commit_hold *hold)
{
struct server_info *server = SCOUTFS_SB(sb)->server_info;
struct scoutfs_super_block *super = DIRTY_SUPER_SB(sb);
struct scoutfs_log_trees lt;
struct scoutfs_key key;
bool found = false;
u64 orphan_rid;
int ret;
int err;
scoutfs_key_init_log_trees(&key, U64_MAX, U64_MAX);
while ((ret = for_each_rid_last_lt(sb, &super->logs_root, &key, &lt)) > 0) {
if ((le64_to_cpu(lt.flags) & SCOUTFS_LOG_TREES_FINALIZED) ||
le64_to_cpu(lt.rid) == rid ||
rid_is_mounted(sb, le64_to_cpu(lt.rid)))
continue;
orphan_rid = le64_to_cpu(lt.rid);
scoutfs_err(sb, "reclaiming orphan log trees for rid %016llx nr %llu",
orphan_rid, le64_to_cpu(lt.nr));
found = true;
do {
mutex_unlock(&server->logs_mutex);
err = reclaim_open_log_tree(sb, orphan_rid);
ret = server_apply_commit(sb, hold,
err == -EINPROGRESS ? 0 : err);
server_hold_commit(sb, hold);
mutex_lock(&server->logs_mutex);
} while (err == -EINPROGRESS && ret == 0);
if (ret < 0)
break;
}
return ret < 0 ? ret : found;
}
#define FINALIZE_POLL_MIN_DELAY_MS 5U
#define FINALIZE_POLL_MAX_DELAY_MS 100U
#define FINALIZE_POLL_DELAY_GROWTH_PCT 150U
@@ -1345,16 +1261,6 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
break;
}
ret = reclaim_orphan_log_trees(sb, rid, hold);
if (ret < 0) {
err_str = "reclaiming orphan log trees";
break;
}
if (ret > 0) {
/* lock was dropped, re-check merge status */
continue;
}
/* look for finalized and other active log btrees */
saw_finalized = false;
others_active = false;
@@ -2023,7 +1929,7 @@ static int reclaim_open_log_tree(struct super_block *sb, u64 rid)
mutex_unlock(&server->alloc_mutex);
/* only finalize, allowing merging, once the allocators are fully freed */
if (ret == 0 && !scoutfs_trigger(sb, RECLAIM_SKIP_FINALIZE)) {
if (ret == 0) {
/* the transaction is no longer open */
lt.commit_trans_seq = lt.get_trans_seq;
@@ -2075,8 +1981,7 @@ static int get_stable_trans_seq(struct super_block *sb, u64 *last_seq_ret)
scoutfs_key_init_log_trees(&key, U64_MAX, U64_MAX);
while ((ret = for_each_rid_last_lt(sb, &super->logs_root, &key, &lt)) > 0) {
if ((le64_to_cpu(lt.get_trans_seq) > le64_to_cpu(lt.commit_trans_seq)) &&
le64_to_cpu(lt.get_trans_seq) <= last_seq &&
rid_is_mounted(sb, le64_to_cpu(lt.rid))) {
le64_to_cpu(lt.get_trans_seq) <= last_seq) {
last_seq = le64_to_cpu(lt.get_trans_seq) - 1;
}
}
@@ -3628,6 +3533,14 @@ out:
return scoutfs_net_response(sb, conn, cmd, id, ret, &nst, sizeof(nst));
}
static void init_mounted_client_key(struct scoutfs_key *key, u64 rid)
{
*key = (struct scoutfs_key) {
.sk_zone = SCOUTFS_MOUNTED_CLIENT_ZONE,
.skmc_rid = cpu_to_le64(rid),
};
}
static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
{
return (iref->val_len != sizeof(struct scoutfs_mounted_client_btree_val));
@@ -3640,7 +3553,7 @@ static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
* it's acceptable to see -EEXIST.
*/
static int insert_mounted_client(struct super_block *sb, u64 rid, u64 gr_flags,
struct sockaddr_in *sin)
struct sockaddr_storage *sin)
{
DECLARE_SERVER_INFO(sb, server);
struct scoutfs_super_block *super = DIRTY_SUPER_SB(sb);
@@ -4393,7 +4306,7 @@ static void fence_pending_recov_worker(struct work_struct *work)
break;
}
ret = scoutfs_fence_start(sb, rid, le32_to_be32(addr.v4.addr),
ret = scoutfs_fence_start(sb, rid, &addr,
SCOUTFS_FENCE_CLIENT_RECOVERY);
if (ret < 0) {
scoutfs_err(sb, "fence returned err %d, shutting down server", ret);
@@ -4544,7 +4457,7 @@ static void scoutfs_server_worker(struct work_struct *work)
struct scoutfs_net_connection *conn = NULL;
struct scoutfs_mount_options opts;
DECLARE_WAIT_QUEUE_HEAD(waitq);
struct sockaddr_in sin;
struct sockaddr_storage sin;
bool alloc_init = false;
u64 max_seq;
int ret;
@@ -4553,7 +4466,7 @@ static void scoutfs_server_worker(struct work_struct *work)
scoutfs_options_read(sb, &opts);
scoutfs_quorum_slot_sin(&server->qconf, opts.quorum_slot_nr, &sin);
scoutfs_info(sb, "server starting at "SIN_FMT, SIN_ARG(&sin));
scoutfs_info(sb, "server starting at "SIN_FMT, &sin);
scoutfs_block_writer_init(sb, &server->wri);
server->finalize_sent_seq = 0;

View File

@@ -1,27 +1,6 @@
#ifndef _SCOUTFS_SERVER_H_
#define _SCOUTFS_SERVER_H_
#define SI4_FMT "%u.%u.%u.%u:%u"
#define si4_trace_define(name) \
__field(__u32, name##_addr) \
__field(__u16, name##_port)
#define si4_trace_assign(name, sin) \
do { \
__typeof__(sin) _sin = (sin); \
\
__entry->name##_addr = be32_to_cpu(_sin->sin_addr.s_addr); \
__entry->name##_port = be16_to_cpu(_sin->sin_port); \
} while(0)
#define si4_trace_args(name) \
(__entry->name##_addr >> 24), \
(__entry->name##_addr >> 16) & 255, \
(__entry->name##_addr >> 8) & 255, \
__entry->name##_addr & 255, \
__entry->name##_port
#define SNH_FMT \
"seq %llu recv_seq %llu id %llu data_len %u cmd %u flags 0x%x error %u"
#define SNH_ARG(nh) \

View File

@@ -30,11 +30,6 @@ void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg)
memset(merg, 0, sizeof(struct scoutfs_totl_merging));
}
/*
* bin the incoming merge inputs so that we can resolve delta items
* properly. Finalized logs that are merge inputs are kept separately
* from those that are not.
*/
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
u64 seq, u8 flags, void *val, int val_len, int fic)
{
@@ -44,10 +39,10 @@ void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
merg->fs_seq = seq;
merg->fs_total = le64_to_cpu(tval->total);
merg->fs_count = le64_to_cpu(tval->count);
} else if (fic & FIC_MERGE_INPUT) {
merg->inp_seq = seq;
merg->inp_total += le64_to_cpu(tval->total);
merg->inp_count += le64_to_cpu(tval->count);
} else if (fic & FIC_FINALIZED) {
merg->fin_seq = seq;
merg->fin_total += le64_to_cpu(tval->total);
merg->fin_count += le64_to_cpu(tval->count);
} else {
merg->log_seq = seq;
merg->log_total += le64_to_cpu(tval->total);
@@ -58,18 +53,15 @@ void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
/*
* .totl. item merging has to be careful because the log btree merging
* code can write partial results to the fs_root. This means that a
* reader can see both cases where merge input deltas should be applied
* to the old fs items and where they have already been applied to the
* partially merged fs items.
*
* Only finalized log trees that are inputs to the current merge cycle
* are tracked in the inp_ bucket. Finalized trees that aren't merge
* inputs and active log trees are always applied unconditionally since
* they cannot be in fs_root.
* reader can see both cases where new finalized logs should be applied
* to the old fs items and where old finalized logs have already been
* applied to the partially merged fs items. Currently active logged
* items are always applied on top of all cases.
*
* These cases are differentiated with a combination of sequence numbers
* in items and the count of contributing xattrs. This lets us
* recognize all cases, including when merge inputs were merged and
* in items, the count of contributing xattrs, and a flag
* differentiating finalized and active logged items. This lets us
* recognize all cases, including when finalized logs were merged and
* deleted the fs item.
*/
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count)
@@ -83,14 +75,14 @@ void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total,
*count = merg->fs_count;
}
/* apply merge input deltas if they're newer or creating */
if (((merg->fs_seq != 0) && (merg->inp_seq > merg->fs_seq)) ||
((merg->fs_seq == 0) && (merg->inp_count > 0))) {
*total += merg->inp_total;
*count += merg->inp_count;
/* apply finalized logs if they're newer or creating */
if (((merg->fs_seq != 0) && (merg->fin_seq > merg->fs_seq)) ||
((merg->fs_seq == 0) && (merg->fin_count > 0))) {
*total += merg->fin_total;
*count += merg->fin_count;
}
/* always apply non-input finalized and active logs */
/* always apply active logs which must be newer than fs and finalized */
if (merg->log_seq > 0) {
*total += merg->log_total;
*count += merg->log_count;

View File

@@ -7,9 +7,9 @@ struct scoutfs_totl_merging {
u64 fs_seq;
u64 fs_total;
u64 fs_count;
u64 inp_seq;
u64 inp_total;
s64 inp_count;
u64 fin_seq;
u64 fin_total;
s64 fin_count;
u64 log_seq;
u64 log_total;
s64 log_count;

View File

@@ -45,8 +45,6 @@ static char *names[] = {
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
[SCOUTFS_TRIGGER_RECLAIM_SKIP_FINALIZE] = "reclaim_skip_finalize",
[SCOUTFS_TRIGGER_LOG_MERGE_FORCE_PARTIAL] = "log_merge_force_partial",
};
bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)

View File

@@ -8,8 +8,6 @@ enum scoutfs_trigger {
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
SCOUTFS_TRIGGER_RECLAIM_SKIP_FINALIZE,
SCOUTFS_TRIGGER_LOG_MERGE_FORCE_PARTIAL,
SCOUTFS_TRIGGER_NR,
};

View File

@@ -95,7 +95,6 @@ struct wkic_info {
/* block reading slow path */
struct mutex roots_mutex;
struct scoutfs_net_roots roots;
u64 merge_input_seq;
u64 roots_read_seq;
ktime_t roots_expire;
@@ -806,79 +805,29 @@ static void free_page_list(struct super_block *sb, struct list_head *list)
* read_seq number so that we can compare the age of the items in cached
* pages. Only one request to refresh the roots is in progress at a
* time. This is the slow path that's only used when the cache isn't
* populated and the roots aren't cached.
*
* We read roots directly from the on-disk superblock rather than
* requesting them from the server so that we can also read the
* log_merge btree from the same superblock. The merge status item
* seq tells us which finalized log trees are inputs to the current
* merge, which is needed to correctly resolve totl delta items.
* populated and the roots aren't cached. The root request is fast
* enough, especially compared to the resulting item reading IO, that we
* don't mind hiding it behind a trivial mutex.
*/
static int refresh_roots(struct super_block *sb, struct wkic_info *winf)
{
struct scoutfs_super_block *super;
struct scoutfs_log_merge_status *stat;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
int ret;
super = kmalloc(sizeof(*super), GFP_NOFS);
if (!super)
return -ENOMEM;
ret = scoutfs_read_super(sb, super);
if (ret < 0)
goto out;
winf->roots = (struct scoutfs_net_roots){
.fs_root = super->fs_root,
.logs_root = super->logs_root,
.srch_root = super->srch_root,
};
winf->merge_input_seq = 0;
if (super->log_merge.ref.blkno) {
scoutfs_key_set_zeros(&key);
key.sk_zone = SCOUTFS_LOG_MERGE_STATUS_ZONE;
ret = scoutfs_btree_lookup(sb, &super->log_merge, &key, &iref);
if (ret == 0) {
if (iref.val_len == sizeof(*stat)) {
stat = iref.val;
winf->merge_input_seq = le64_to_cpu(stat->seq);
} else {
ret = -EUCLEAN;
}
scoutfs_btree_put_iref(&iref);
} else if (ret == -ENOENT) {
ret = 0;
}
if (ret < 0)
goto out;
}
winf->roots_read_seq++;
winf->roots_expire = ktime_add_ms(ktime_get_raw(), WKIC_CACHE_LIFETIME_MS);
out:
kfree(super);
return ret;
}
static int get_roots(struct super_block *sb, struct wkic_info *winf,
struct scoutfs_net_roots *roots_ret, u64 *merge_input_seq,
u64 *read_seq, bool force_new)
struct scoutfs_net_roots *roots_ret, u64 *read_seq, bool force_new)
{
struct scoutfs_net_roots roots;
int ret;
mutex_lock(&winf->roots_mutex);
if (force_new || ktime_before(winf->roots_expire, ktime_get_raw())) {
ret = refresh_roots(sb, winf);
ret = scoutfs_client_get_roots(sb, &roots);
if (ret < 0)
goto out;
winf->roots = roots;
winf->roots_read_seq++;
winf->roots_expire = ktime_add_ms(ktime_get_raw(), WKIC_CACHE_LIFETIME_MS);
}
*roots_ret = winf->roots;
*merge_input_seq = winf->merge_input_seq;
*read_seq = winf->roots_read_seq;
ret = 0;
out:
@@ -921,30 +870,24 @@ static int insert_read_pages(struct super_block *sb, struct wkic_info *winf,
struct scoutfs_key end;
struct wkic_page *wpage;
LIST_HEAD(pages);
u64 merge_input_seq;
u64 read_seq = 0;
u64 read_seq;
int ret;
ret = 0;
retry_stale:
ret = get_roots(sb, winf, &roots, &merge_input_seq, &read_seq, ret == -ESTALE);
ret = get_roots(sb, winf, &roots, &read_seq, ret == -ESTALE);
if (ret < 0)
goto check_stale;
goto out;
start = *range_start;
end = *range_end;
ret = scoutfs_forest_read_items_roots(sb, &roots, merge_input_seq, key, range_start,
&start, &end, read_items_cb, &root);
ret = scoutfs_forest_read_items_roots(sb, &roots, key, range_start, &start, &end,
read_items_cb, &root);
trace_scoutfs_wkic_read_items(sb, key, &start, &end);
check_stale:
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.fs_root.ref, &roots.logs_root.ref);
if (ret < 0) {
if (ret == -ESTALE) {
/* not safe to retry due to delta items, must restart clean */
free_item_tree(&root);
root = RB_ROOT;
if (ret == -ESTALE)
goto retry_stale;
}
goto out;
}

View File

@@ -1265,7 +1265,6 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
ret = parse_indx_key(&tag_key, xat->name, xat->name_len, ino);
if (ret < 0)
goto out;
scoutfs_xattr_set_indx_key_xid(&tag_key, le64_to_cpu(key.skx_id));
}
if ((tgs.totl || tgs.indx) && locked_zone != tag_key.sk_zone) {

View File

@@ -20,6 +20,9 @@ t_filter_fs()
# [ 2687.691366] BUG: KASAN: stack-out-of-bounds in get_reg+0x1bc/0x230
# ...
# [ 2687.706220] ==================================================================
# [ 2687.707284] Disabling lock debugging due to kernel taint
#
# That final lock debugging message may not be included.
#
ignore_harmless_unwind_kasan_stack_oob()
{
@@ -43,6 +46,10 @@ awk '
saved=""
}
( in_soob == 2 && $0 ~ /==================================================================/ ) {
in_soob = 3
soob_nr = NR
}
( in_soob == 3 && NR > soob_nr && $0 !~ /Disabling lock debugging/ ) {
in_soob = 0
}
( !in_soob ) { print $0 }
@@ -54,58 +61,6 @@ awk '
'
}
#
# in el97+, XFS can generate a spurious lockdep circular dependency
# warning about reclaim. Fixed upstream in e.g. v5.7-rc4-129-g6dcde60efd94
#
ignore_harmless_xfs_lockdep_warning()
{
awk '
BEGIN {
in_block = 0
block_nr = 0
buf = ""
}
( !in_block && $0 ~ /======================================================/ ) {
in_block = 1
block_nr = NR
buf = $0 "\n"
next
}
( in_block == 1 && NR == (block_nr + 1) ) {
if (match($0, /WARNING: possible circular locking dependency detected/) != 0) {
in_block = 2
buf = buf $0 "\n"
} else {
in_block = 0
printf "%s", buf
print $0
buf = ""
}
next
}
( in_block == 2 ) {
buf = buf $0 "\n"
if ($0 ~ /<\/TASK>/) {
if (buf ~ /xfs_nondir_ilock_class/ && buf ~ /fs_reclaim/) {
# known xfs lockdep false positive, discard
} else {
printf "%s", buf
}
in_block = 0
buf = ""
}
next
}
{ print $0 }
END {
if (buf) {
printf "%s", buf
}
}
'
}
#
# Filter out expected messages. Putting messages here implies that
# tests aren't relying on messages to discover failures.. they're
@@ -168,9 +123,6 @@ t_filter_dmesg()
re="$re|hrtimer: interrupt took .*"
re="$re|clocksource: Long readout interval"
# orphan log trees reclaim is handled, not an error
re="$re|scoutfs .* reclaiming orphan log trees"
# fencing tests force unmounts and trigger timeouts
re="$re|scoutfs .* forcing unmount"
re="$re|scoutfs .* reconnect timed out"
@@ -221,10 +173,6 @@ t_filter_dmesg()
# creating block devices may trigger this
re="$re|block device autoloading is deprecated and will be removed."
# lockdep or kasan warnings can cause this
re="$re|Disabling lock debugging due to kernel taint"
egrep -v "($re)" | \
ignore_harmless_unwind_kasan_stack_oob | \
ignore_harmless_xfs_lockdep_warning
ignore_harmless_unwind_kasan_stack_oob
}

View File

@@ -1,54 +0,0 @@
== testing invalid read-xattr-index arguments
bad index position entry argument 'bad', it must be in the form "a.b.ino" where each value can be prefixed by '0' for octal or '0x' for hex
scoutfs: read-xattr-index failed: Invalid argument (22)
bad index position entry argument '1.2', it must be in the form "a.b.ino" where each value can be prefixed by '0' for octal or '0x' for hex
scoutfs: read-xattr-index failed: Invalid argument (22)
initial major index position '256' must be between 0 and 255, inclusive.
scoutfs: read-xattr-index failed: Invalid argument (22)
first index position 1.2.3 must be less than last index position 0.0.0
scoutfs: read-xattr-index failed: Invalid argument (22)
first index position 1.2.0 must be less than last index position 1.1.2
scoutfs: read-xattr-index failed: Invalid argument (22)
first index position 2.2.2 must be less than last index position 2.2.1
scoutfs: read-xattr-index failed: Invalid argument (22)
== testing invalid names
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/invalid: Numerical result out of range
== testing boundary values
0.0 found
255.max found
== indx xattr must have no value
setfattr: /mnt/test/test/basic-xattr-indx/noval: Invalid argument
setfattr: /mnt/test/test/basic-xattr-indx/noval: Invalid argument
== set indx xattr and verify index entry
found
== setting same indx xattr again is a no-op
found
== removing non-existent indx xattr succeeds
setfattr: /mnt/test/test/basic-xattr-indx/file: No such attribute
still found
== explicit xattr removal cleans up index entry
== file deletion cleans up index entry
found before delete
== multiple indx xattrs on one file cleaned up by deletion
entries before delete: 2
entries after delete: 0
== partial removal leaves other entries
300 found
== multiple files at same index position
files at same position: 2
surviving file found
== cross-mount visibility
found on mount 1
== duplicate position deduplication
entries for same position: 1

View File

View File

@@ -1,3 +0,0 @@
== create orphan log_trees entry via trigger
== verify orphan is reclaimed and merge completes
== verify orphan reclaim was logged

View File

@@ -1,460 +0,0 @@
== missing options should fail ==
punch-offline: must provide offset
Try `punch-offline --help' or `punch-offline --usage' for more information.
punch-offline: must provide length
Try `punch-offline --help' or `punch-offline --usage' for more information.
punch-offline: must provide data_version
Try `punch-offline --help' or `punch-offline --usage' for more information.
== can't hole punch dir or special ==
failed to open '/mnt/test.0/test/punch-offline/dir': Is a directory (21)
scoutfs: punch-offline failed: Is a directory (21)
== punching an empty file does nothing ==
== punch outside of i_size does nothing ==
== can't hole punch online extent ==
0: offset: 0 length: 4096 flags: ..L
extents: 1
punch_offline ioctl failed: Invalid argument (22)
scoutfs: punch-offline failed: Invalid argument (22)
0: offset: 0 length: 4096 flags: ..L
extents: 1
== can't hole punch unwritten extent ==
0: offset: 0 length: 12288 flags: .UL
extents: 1
punch_offline ioctl failed: Invalid argument (22)
scoutfs: punch-offline failed: Invalid argument (22)
0: offset: 0 length: 12288 flags: .UL
extents: 1
== hole punch offline extent ==
0: offset: 0 length: 12288 flags: O.L
extents: 1
0: offset: 0 length: 4096 flags: O..
1: offset: 8192 length: 4096 flags: O.L
extents: 2
== can't hole punch non-aligned bsz offset or len ==
0: offset: 0 length: 12288 flags: O.L
extents: 1
punch_offline ioctl failed: Value too large for defined data type (75)
scoutfs: punch-offline failed: Value too large for defined data type (75)
punch_offline ioctl failed: Value too large for defined data type (75)
scoutfs: punch-offline failed: Value too large for defined data type (75)
punch_offline ioctl failed: Value too large for defined data type (75)
scoutfs: punch-offline failed: Value too large for defined data type (75)
punch_offline ioctl failed: Value too large for defined data type (75)
scoutfs: punch-offline failed: Value too large for defined data type (75)
punch_offline ioctl failed: Value too large for defined data type (75)
scoutfs: punch-offline failed: Value too large for defined data type (75)
punch_offline ioctl failed: Value too large for defined data type (75)
scoutfs: punch-offline failed: Value too large for defined data type (75)
0: offset: 0 length: 12288 flags: O.L
extents: 1
== can't hole punch mismatched data_version ==
0: offset: 0 length: 12288 flags: O.L
extents: 1
punch_offline ioctl failed: Stale file handle (116)
scoutfs: punch-offline failed: Stale file handle (116)
punch_offline ioctl failed: Stale file handle (116)
scoutfs: punch-offline failed: Stale file handle (116)
punch_offline ioctl failed: Stale file handle (116)
scoutfs: punch-offline failed: Stale file handle (116)
0: offset: 0 length: 12288 flags: O.L
extents: 1
== Punch hole crossing multiple extents ==
0: offset: 0 length: 7 flags: O.L
extents: 1
0: offset: 0 length: 1 flags: O..
1: offset: 2 length: 1 flags: O..
2: offset: 4 length: 1 flags: O..
3: offset: 6 length: 1 flags: O.L
extents: 4
0: offset: 0 length: 1 flags: O..
1: offset: 6 length: 1 flags: O.L
extents: 2
== punch hole starting at a hole ==
0: offset: 0 length: 7 flags: O.L
extents: 1
0: offset: 0 length: 1 flags: O..
1: offset: 2 length: 1 flags: O..
2: offset: 4 length: 1 flags: O..
3: offset: 6 length: 1 flags: O.L
extents: 4
0: offset: 0 length: 1 flags: O..
1: offset: 6 length: 1 flags: O.L
extents: 2
== large punch ==
0: offset: 0 length: 1572864 flags: O.L
extents: 1
0: offset: 0 length: 134123 flags: O..
1: offset: 202466 length: 264807 flags: O..
2: offset: 535616 length: 199007 flags: O..
3: offset: 802966 length: 769898 flags: O.L
extents: 4
== overlapping punches with lots of extents ==
0: offset: 0 length: 4194304 flags: O.L
extents: 1
extents: 512
extents: 505
extents: 378
extents: 252
0: offset: 0 length: 4096 flags: O..
1: offset: 8192 length: 4096 flags: O..
2: offset: 32768 length: 4096 flags: O..
3: offset: 40960 length: 4096 flags: O..
4: offset: 65536 length: 4096 flags: O..
5: offset: 73728 length: 4096 flags: O..
6: offset: 98304 length: 4096 flags: O..
7: offset: 106496 length: 4096 flags: O..
8: offset: 196608 length: 4096 flags: O..
9: offset: 204800 length: 4096 flags: O..
10: offset: 229376 length: 4096 flags: O..
11: offset: 237568 length: 4096 flags: O..
12: offset: 262144 length: 4096 flags: O..
13: offset: 270336 length: 4096 flags: O..
14: offset: 294912 length: 4096 flags: O..
15: offset: 303104 length: 4096 flags: O..
16: offset: 327680 length: 4096 flags: O..
17: offset: 335872 length: 4096 flags: O..
18: offset: 360448 length: 4096 flags: O..
19: offset: 368640 length: 4096 flags: O..
20: offset: 393216 length: 4096 flags: O..
21: offset: 401408 length: 4096 flags: O..
22: offset: 425984 length: 4096 flags: O..
23: offset: 434176 length: 4096 flags: O..
24: offset: 458752 length: 4096 flags: O..
25: offset: 466944 length: 4096 flags: O..
26: offset: 491520 length: 4096 flags: O..
27: offset: 499712 length: 4096 flags: O..
28: offset: 720896 length: 4096 flags: O..
29: offset: 729088 length: 4096 flags: O..
30: offset: 753664 length: 4096 flags: O..
31: offset: 761856 length: 4096 flags: O..
32: offset: 786432 length: 4096 flags: O..
33: offset: 794624 length: 4096 flags: O..
34: offset: 819200 length: 4096 flags: O..
35: offset: 827392 length: 4096 flags: O..
36: offset: 851968 length: 4096 flags: O..
37: offset: 860160 length: 4096 flags: O..
38: offset: 884736 length: 4096 flags: O..
39: offset: 892928 length: 4096 flags: O..
40: offset: 917504 length: 4096 flags: O..
41: offset: 925696 length: 4096 flags: O..
42: offset: 950272 length: 4096 flags: O..
43: offset: 958464 length: 4096 flags: O..
44: offset: 983040 length: 4096 flags: O..
45: offset: 991232 length: 4096 flags: O..
46: offset: 1015808 length: 4096 flags: O..
47: offset: 1024000 length: 4096 flags: O..
48: offset: 1048576 length: 4096 flags: O..
49: offset: 1056768 length: 4096 flags: O..
50: offset: 1081344 length: 4096 flags: O..
51: offset: 1089536 length: 4096 flags: O..
52: offset: 1114112 length: 4096 flags: O..
53: offset: 1122304 length: 4096 flags: O..
54: offset: 1146880 length: 4096 flags: O..
55: offset: 1155072 length: 4096 flags: O..
56: offset: 1179648 length: 4096 flags: O..
57: offset: 1187840 length: 4096 flags: O..
58: offset: 1212416 length: 4096 flags: O..
59: offset: 1220608 length: 4096 flags: O..
60: offset: 1245184 length: 4096 flags: O..
61: offset: 1253376 length: 4096 flags: O..
62: offset: 1277952 length: 4096 flags: O..
63: offset: 1286144 length: 4096 flags: O..
64: offset: 1310720 length: 4096 flags: O..
65: offset: 1318912 length: 4096 flags: O..
66: offset: 1343488 length: 4096 flags: O..
67: offset: 1351680 length: 4096 flags: O..
68: offset: 1376256 length: 4096 flags: O..
69: offset: 1384448 length: 4096 flags: O..
70: offset: 1409024 length: 4096 flags: O..
71: offset: 1417216 length: 4096 flags: O..
72: offset: 1441792 length: 4096 flags: O..
73: offset: 1449984 length: 4096 flags: O..
74: offset: 1474560 length: 4096 flags: O..
75: offset: 1482752 length: 4096 flags: O..
76: offset: 1507328 length: 4096 flags: O..
77: offset: 1515520 length: 4096 flags: O..
78: offset: 1540096 length: 4096 flags: O..
79: offset: 1548288 length: 4096 flags: O..
80: offset: 1572864 length: 4096 flags: O..
81: offset: 1581056 length: 4096 flags: O..
82: offset: 1605632 length: 4096 flags: O..
83: offset: 1613824 length: 4096 flags: O..
84: offset: 1638400 length: 4096 flags: O..
85: offset: 1646592 length: 4096 flags: O..
86: offset: 1671168 length: 4096 flags: O..
87: offset: 1679360 length: 4096 flags: O..
88: offset: 1703936 length: 4096 flags: O..
89: offset: 1712128 length: 4096 flags: O..
90: offset: 1736704 length: 4096 flags: O..
91: offset: 1744896 length: 4096 flags: O..
92: offset: 1769472 length: 4096 flags: O..
93: offset: 1777664 length: 4096 flags: O..
94: offset: 1802240 length: 4096 flags: O..
95: offset: 1810432 length: 4096 flags: O..
96: offset: 1835008 length: 4096 flags: O..
97: offset: 1843200 length: 4096 flags: O..
98: offset: 1867776 length: 4096 flags: O..
99: offset: 1875968 length: 4096 flags: O..
100: offset: 1900544 length: 4096 flags: O..
101: offset: 1908736 length: 4096 flags: O..
102: offset: 1933312 length: 4096 flags: O..
103: offset: 1941504 length: 4096 flags: O..
104: offset: 1966080 length: 4096 flags: O..
105: offset: 1974272 length: 4096 flags: O..
106: offset: 1998848 length: 4096 flags: O..
107: offset: 2007040 length: 4096 flags: O..
108: offset: 2031616 length: 4096 flags: O..
109: offset: 2039808 length: 4096 flags: O..
110: offset: 2064384 length: 4096 flags: O..
111: offset: 2072576 length: 4096 flags: O..
112: offset: 2097152 length: 4096 flags: O..
113: offset: 2105344 length: 4096 flags: O..
114: offset: 2129920 length: 4096 flags: O..
115: offset: 2138112 length: 4096 flags: O..
116: offset: 2162688 length: 4096 flags: O..
117: offset: 2170880 length: 4096 flags: O..
118: offset: 2195456 length: 4096 flags: O..
119: offset: 2203648 length: 4096 flags: O..
120: offset: 2228224 length: 4096 flags: O..
121: offset: 2236416 length: 4096 flags: O..
122: offset: 2260992 length: 4096 flags: O..
123: offset: 2269184 length: 4096 flags: O..
124: offset: 2293760 length: 4096 flags: O..
125: offset: 2301952 length: 4096 flags: O..
126: offset: 2326528 length: 4096 flags: O..
127: offset: 2334720 length: 4096 flags: O..
128: offset: 2359296 length: 4096 flags: O..
129: offset: 2367488 length: 4096 flags: O..
130: offset: 2392064 length: 4096 flags: O..
131: offset: 2400256 length: 4096 flags: O..
132: offset: 2424832 length: 4096 flags: O..
133: offset: 2433024 length: 4096 flags: O..
134: offset: 2457600 length: 4096 flags: O..
135: offset: 2465792 length: 4096 flags: O..
136: offset: 2490368 length: 4096 flags: O..
137: offset: 2498560 length: 4096 flags: O..
138: offset: 2523136 length: 4096 flags: O..
139: offset: 2531328 length: 4096 flags: O..
140: offset: 2555904 length: 4096 flags: O..
141: offset: 2564096 length: 4096 flags: O..
142: offset: 2588672 length: 4096 flags: O..
143: offset: 2596864 length: 4096 flags: O..
144: offset: 2621440 length: 4096 flags: O..
145: offset: 2629632 length: 4096 flags: O..
146: offset: 2654208 length: 4096 flags: O..
147: offset: 2662400 length: 4096 flags: O..
148: offset: 2686976 length: 4096 flags: O..
149: offset: 2695168 length: 4096 flags: O..
150: offset: 2719744 length: 4096 flags: O..
151: offset: 2727936 length: 4096 flags: O..
152: offset: 2752512 length: 4096 flags: O..
153: offset: 2760704 length: 4096 flags: O..
154: offset: 2785280 length: 4096 flags: O..
155: offset: 2793472 length: 4096 flags: O..
156: offset: 2818048 length: 4096 flags: O..
157: offset: 2826240 length: 4096 flags: O..
158: offset: 2850816 length: 4096 flags: O..
159: offset: 2859008 length: 4096 flags: O..
160: offset: 2883584 length: 4096 flags: O..
161: offset: 2891776 length: 4096 flags: O..
162: offset: 2916352 length: 4096 flags: O..
163: offset: 2924544 length: 4096 flags: O..
164: offset: 2949120 length: 4096 flags: O..
165: offset: 2957312 length: 4096 flags: O..
166: offset: 2981888 length: 4096 flags: O..
167: offset: 2990080 length: 4096 flags: O..
168: offset: 3014656 length: 4096 flags: O..
169: offset: 3022848 length: 4096 flags: O..
170: offset: 3047424 length: 4096 flags: O..
171: offset: 3055616 length: 4096 flags: O..
172: offset: 3080192 length: 4096 flags: O..
173: offset: 3088384 length: 4096 flags: O..
174: offset: 3112960 length: 4096 flags: O..
175: offset: 3121152 length: 4096 flags: O..
176: offset: 3145728 length: 4096 flags: O..
177: offset: 3153920 length: 4096 flags: O..
178: offset: 3178496 length: 4096 flags: O..
179: offset: 3186688 length: 4096 flags: O..
180: offset: 3211264 length: 4096 flags: O..
181: offset: 3219456 length: 4096 flags: O..
182: offset: 3244032 length: 4096 flags: O..
183: offset: 3252224 length: 4096 flags: O..
184: offset: 3276800 length: 4096 flags: O..
185: offset: 3284992 length: 4096 flags: O..
186: offset: 3309568 length: 4096 flags: O..
187: offset: 3317760 length: 4096 flags: O..
188: offset: 3342336 length: 4096 flags: O..
189: offset: 3350528 length: 4096 flags: O..
190: offset: 3375104 length: 4096 flags: O..
191: offset: 3383296 length: 4096 flags: O..
192: offset: 3407872 length: 4096 flags: O..
193: offset: 3416064 length: 4096 flags: O..
194: offset: 3440640 length: 4096 flags: O..
195: offset: 3448832 length: 4096 flags: O..
196: offset: 3473408 length: 4096 flags: O..
197: offset: 3481600 length: 4096 flags: O..
198: offset: 3506176 length: 4096 flags: O..
199: offset: 3514368 length: 4096 flags: O..
200: offset: 3538944 length: 4096 flags: O..
201: offset: 3547136 length: 4096 flags: O..
202: offset: 3571712 length: 4096 flags: O..
203: offset: 3579904 length: 4096 flags: O..
204: offset: 3604480 length: 4096 flags: O..
205: offset: 3612672 length: 4096 flags: O..
206: offset: 3637248 length: 4096 flags: O..
207: offset: 3645440 length: 4096 flags: O..
208: offset: 3670016 length: 4096 flags: O..
209: offset: 3678208 length: 4096 flags: O..
210: offset: 3702784 length: 4096 flags: O..
211: offset: 3710976 length: 4096 flags: O..
212: offset: 3735552 length: 4096 flags: O..
213: offset: 3743744 length: 4096 flags: O..
214: offset: 3768320 length: 4096 flags: O..
215: offset: 3776512 length: 4096 flags: O..
216: offset: 3801088 length: 4096 flags: O..
217: offset: 3809280 length: 4096 flags: O..
218: offset: 3833856 length: 4096 flags: O..
219: offset: 3842048 length: 4096 flags: O..
220: offset: 3866624 length: 4096 flags: O..
221: offset: 3874816 length: 4096 flags: O..
222: offset: 3899392 length: 4096 flags: O..
223: offset: 3907584 length: 4096 flags: O..
224: offset: 3932160 length: 4096 flags: O..
225: offset: 3940352 length: 4096 flags: O..
226: offset: 3964928 length: 4096 flags: O..
227: offset: 3973120 length: 4096 flags: O..
228: offset: 3997696 length: 4096 flags: O..
229: offset: 4005888 length: 4096 flags: O..
230: offset: 4030464 length: 4096 flags: O..
231: offset: 4038656 length: 4096 flags: O..
232: offset: 4063232 length: 4096 flags: O..
233: offset: 4071424 length: 4096 flags: O..
234: offset: 4096000 length: 4096 flags: O..
235: offset: 4104192 length: 4096 flags: O..
236: offset: 4128768 length: 4096 flags: O..
237: offset: 4136960 length: 4096 flags: O..
238: offset: 4161536 length: 4096 flags: O..
239: offset: 4169728 length: 4096 flags: O.L
extents: 240
0: offset: 0 length: 1 flags: O..
1: offset: 8 length: 1 flags: O..
2: offset: 16 length: 1 flags: O..
3: offset: 24 length: 1 flags: O..
4: offset: 48 length: 1 flags: O..
5: offset: 56 length: 1 flags: O..
6: offset: 64 length: 1 flags: O..
7: offset: 72 length: 1 flags: O..
8: offset: 80 length: 1 flags: O..
9: offset: 88 length: 1 flags: O..
10: offset: 96 length: 1 flags: O..
11: offset: 104 length: 1 flags: O..
12: offset: 112 length: 1 flags: O..
13: offset: 120 length: 1 flags: O..
14: offset: 176 length: 1 flags: O..
15: offset: 184 length: 1 flags: O..
16: offset: 192 length: 1 flags: O..
17: offset: 200 length: 1 flags: O..
18: offset: 208 length: 1 flags: O..
19: offset: 216 length: 1 flags: O..
20: offset: 224 length: 1 flags: O..
21: offset: 232 length: 1 flags: O..
22: offset: 240 length: 1 flags: O..
23: offset: 248 length: 1 flags: O..
24: offset: 256 length: 1 flags: O..
25: offset: 264 length: 1 flags: O..
26: offset: 272 length: 1 flags: O..
27: offset: 280 length: 1 flags: O..
28: offset: 288 length: 1 flags: O..
29: offset: 296 length: 1 flags: O..
30: offset: 304 length: 1 flags: O..
31: offset: 312 length: 1 flags: O..
32: offset: 320 length: 1 flags: O..
33: offset: 328 length: 1 flags: O..
34: offset: 336 length: 1 flags: O..
35: offset: 344 length: 1 flags: O..
36: offset: 352 length: 1 flags: O..
37: offset: 360 length: 1 flags: O..
38: offset: 368 length: 1 flags: O..
39: offset: 376 length: 1 flags: O..
40: offset: 384 length: 1 flags: O..
41: offset: 392 length: 1 flags: O..
42: offset: 400 length: 1 flags: O..
43: offset: 408 length: 1 flags: O..
44: offset: 416 length: 1 flags: O..
45: offset: 424 length: 1 flags: O..
46: offset: 432 length: 1 flags: O..
47: offset: 440 length: 1 flags: O..
48: offset: 448 length: 1 flags: O..
49: offset: 456 length: 1 flags: O..
50: offset: 464 length: 1 flags: O..
51: offset: 472 length: 1 flags: O..
52: offset: 480 length: 1 flags: O..
53: offset: 488 length: 1 flags: O..
54: offset: 496 length: 1 flags: O..
55: offset: 504 length: 1 flags: O..
56: offset: 512 length: 1 flags: O..
57: offset: 520 length: 1 flags: O..
58: offset: 528 length: 1 flags: O..
59: offset: 536 length: 1 flags: O..
60: offset: 544 length: 1 flags: O..
61: offset: 552 length: 1 flags: O..
62: offset: 560 length: 1 flags: O..
63: offset: 568 length: 1 flags: O..
64: offset: 576 length: 1 flags: O..
65: offset: 584 length: 1 flags: O..
66: offset: 592 length: 1 flags: O..
67: offset: 600 length: 1 flags: O..
68: offset: 608 length: 1 flags: O..
69: offset: 616 length: 1 flags: O..
70: offset: 624 length: 1 flags: O..
71: offset: 632 length: 1 flags: O..
72: offset: 640 length: 1 flags: O..
73: offset: 648 length: 1 flags: O..
74: offset: 656 length: 1 flags: O..
75: offset: 664 length: 1 flags: O..
76: offset: 672 length: 1 flags: O..
77: offset: 680 length: 1 flags: O..
78: offset: 688 length: 1 flags: O..
79: offset: 696 length: 1 flags: O..
80: offset: 704 length: 1 flags: O..
81: offset: 712 length: 1 flags: O..
82: offset: 720 length: 1 flags: O..
83: offset: 728 length: 1 flags: O..
84: offset: 736 length: 1 flags: O..
85: offset: 744 length: 1 flags: O..
86: offset: 752 length: 1 flags: O..
87: offset: 760 length: 1 flags: O..
88: offset: 768 length: 1 flags: O..
89: offset: 776 length: 1 flags: O..
90: offset: 784 length: 1 flags: O..
91: offset: 792 length: 1 flags: O..
92: offset: 800 length: 1 flags: O..
93: offset: 808 length: 1 flags: O..
94: offset: 816 length: 1 flags: O..
95: offset: 824 length: 1 flags: O..
96: offset: 832 length: 1 flags: O..
97: offset: 840 length: 1 flags: O..
98: offset: 848 length: 1 flags: O..
99: offset: 856 length: 1 flags: O..
100: offset: 864 length: 1 flags: O..
101: offset: 872 length: 1 flags: O..
102: offset: 880 length: 1 flags: O..
103: offset: 888 length: 1 flags: O..
104: offset: 896 length: 1 flags: O..
105: offset: 904 length: 1 flags: O..
106: offset: 912 length: 1 flags: O..
107: offset: 920 length: 1 flags: O..
108: offset: 928 length: 1 flags: O..
109: offset: 936 length: 1 flags: O..
110: offset: 944 length: 1 flags: O..
111: offset: 952 length: 1 flags: O..
112: offset: 960 length: 1 flags: O..
113: offset: 968 length: 1 flags: O..
114: offset: 976 length: 1 flags: O..
115: offset: 984 length: 1 flags: O..
116: offset: 992 length: 1 flags: O..
117: offset: 1000 length: 1 flags: O..
118: offset: 1008 length: 1 flags: O..
119: offset: 1016 length: 1 flags: O.L
extents: 120
extents: 0

View File

@@ -1,3 +0,0 @@
== setup
expected 4681
== cleanup

View File

@@ -383,7 +383,7 @@ fi
quo=""
if [ -n "$T_MKFS" ]; then
for i in $(seq -0 $((T_QUORUM - 1))); do
quo="$quo -Q $i,127.0.0.1,$((T_TEST_PORT + i))"
quo="$quo -Q $i,::1,$((T_TEST_PORT + i))"
done
msg "making new filesystem with $T_QUORUM quorum members"

View File

@@ -11,7 +11,6 @@ simple-readdir.sh
get-referring-entries.sh
fallocate.sh
basic-truncate.sh
punch-offline.sh
data-prealloc.sh
setattr_more.sh
offline-extent-waiting.sh
@@ -26,9 +25,7 @@ srch-basic-functionality.sh
simple-xattr-unit.sh
retention-basic.sh
totl-xattr-tag.sh
basic-xattr-indx.sh
quota.sh
totl-merge-read.sh
lock-refleak.sh
lock-shrink-consistency.sh
lock-shrink-read-race.sh
@@ -52,7 +49,6 @@ setup-error-teardown.sh
resize-devices.sh
change-devices.sh
fence-and-reclaim.sh
orphan-log-trees.sh
quorum-heartbeat-timeout.sh
orphan-inodes.sh
mount-unmount-race.sh

View File

@@ -1,143 +0,0 @@
#
# Test basic .indx. xattr tag functionality and index entry lifecycle
#
t_require_commands touch rm setfattr scoutfs stat
t_require_mounts 2
# query index from a specific mount, default mount 0
read_xattr_index()
{
local nr="${1:-0}"
local mnt="$(eval echo \$T_M$nr)"
shift
sync
echo 1 > $(t_debugfs_path $nr)/drop_weak_item_cache
scoutfs read-xattr-index -p "$mnt" "$@"
}
MAJOR=5
MINOR=100
echo "== testing invalid read-xattr-index arguments"
scoutfs read-xattr-index -p "$T_M0" bad 2>&1
scoutfs read-xattr-index -p "$T_M0" 1.2 2>&1
scoutfs read-xattr-index -p "$T_M0" 1.2.3 256.0.0 2>&1
scoutfs read-xattr-index -p "$T_M0" 1.2.3 0.0.0 2>&1
scoutfs read-xattr-index -p "$T_M0" 1.2.0 1.1.2 2>&1
scoutfs read-xattr-index -p "$T_M0" 2.2.2 2.2.1 2>&1
echo "== testing invalid names"
touch "$T_D0/invalid"
setfattr -n scoutfs.hide.indx.test.$MAJOR "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.. "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test..$MINOR "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.$MAJOR. "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.256.$MINOR "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.abc.$MINOR "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.$MAJOR.abc "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.-1.$MINOR "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.$MAJOR.-1 "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.18446744073709551616.$MINOR "$T_D0/invalid" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.$(printf 'x%.0s' $(seq 1 240)).$MAJOR.$MINOR "$T_D0/invalid" 2>&1 | t_filter_fs
rm -f "$T_D0/invalid"
echo "== testing boundary values"
touch "$T_D0/boundary"
INO=$(stat -c "%i" "$T_D0/boundary")
setfattr -n scoutfs.hide.indx.test.0.0 "$T_D0/boundary"
read_xattr_index 0 0.0.0 0.0.-1 | awk '($3 == "'$INO'") {print "0.0 found"}'
setfattr -x scoutfs.hide.indx.test.0.0 "$T_D0/boundary"
setfattr -n scoutfs.hide.indx.test.255.18446744073709551615 "$T_D0/boundary"
read_xattr_index 0 255.0.0 255.-1.-1 | awk '($3 == "'$INO'") {print "255.max found"}'
setfattr -x scoutfs.hide.indx.test.255.18446744073709551615 "$T_D0/boundary"
rm -f "$T_D0/boundary"
echo "== indx xattr must have no value"
touch "$T_D0/noval"
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR -v "" "$T_D0/noval" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR -v 0 "$T_D0/noval" 2>&1 | t_filter_fs
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR -v 1 "$T_D0/noval" 2>&1 | t_filter_fs
rm -f "$T_D0/noval"
echo "== set indx xattr and verify index entry"
touch "$T_D0/file"
INO=$(stat -c "%i" "$T_D0/file")
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/file"
read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found"}'
echo "== setting same indx xattr again is a no-op"
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/file"
read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found"}'
echo "== removing non-existent indx xattr succeeds"
setfattr -x scoutfs.hide.indx.nonexistent.$MAJOR.999 "$T_D0/file" 2>&1 | t_filter_fs
read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "still found"}'
echo "== explicit xattr removal cleans up index entry"
setfattr -x scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/file"
read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found orphan"}'
rm -f "$T_D0/file"
echo "== file deletion cleans up index entry"
touch "$T_D0/file2"
INO=$(stat -c "%i" "$T_D0/file2")
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/file2"
read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found before delete"}'
rm -f "$T_D0/file2"
read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found orphan after delete"}'
echo "== multiple indx xattrs on one file cleaned up by deletion"
touch "$T_D0/file3"
INO=$(stat -c "%i" "$T_D0/file3")
setfattr -n scoutfs.hide.indx.a.$MAJOR.200 "$T_D0/file3"
setfattr -n scoutfs.hide.indx.b.$MAJOR.300 "$T_D0/file3"
BEFORE=$(read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'")' | wc -l)
echo "entries before delete: $BEFORE"
rm -f "$T_D0/file3"
AFTER=$(read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'")' | wc -l)
echo "entries after delete: $AFTER"
echo "== partial removal leaves other entries"
touch "$T_D0/partial"
INO=$(stat -c "%i" "$T_D0/partial")
setfattr -n scoutfs.hide.indx.a.$MAJOR.200 "$T_D0/partial"
setfattr -n scoutfs.hide.indx.b.$MAJOR.300 "$T_D0/partial"
setfattr -x scoutfs.hide.indx.a.$MAJOR.200 "$T_D0/partial"
read_xattr_index 0 $MAJOR.200.0 $MAJOR.200.-1 | awk '($3 == "'$INO'") {print "200 found"}'
read_xattr_index 0 $MAJOR.300.0 $MAJOR.300.-1 | awk '($3 == "'$INO'") {print "300 found"}'
rm -f "$T_D0/partial"
echo "== multiple files at same index position"
touch "$T_D0/multi_a" "$T_D0/multi_b"
INO_A=$(stat -c "%i" "$T_D0/multi_a")
INO_B=$(stat -c "%i" "$T_D0/multi_b")
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/multi_a"
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/multi_b"
COUNT=$(read_xattr_index 0 $MAJOR.$MINOR.0 $MAJOR.$MINOR.-1 | wc -l)
echo "files at same position: $COUNT"
rm -f "$T_D0/multi_a"
read_xattr_index 0 $MAJOR.$MINOR.0 $MAJOR.$MINOR.-1 | awk '($3 == "'$INO_A'") {print "deleted file still found"}'
read_xattr_index 0 $MAJOR.$MINOR.0 $MAJOR.$MINOR.-1 | awk '($3 == "'$INO_B'") {print "surviving file found"}'
rm -f "$T_D0/multi_b"
echo "== cross-mount visibility"
touch "$T_D0/file4"
INO=$(stat -c "%i" "$T_D0/file4")
setfattr -n scoutfs.hide.indx.test.$MAJOR.$MINOR "$T_D0/file4"
read_xattr_index 1 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found on mount 1"}'
rm -f "$T_D0/file4"
read_xattr_index 1 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'") {print "found orphan on mount 1"}'
echo "== duplicate position deduplication"
touch "$T_D0/file5"
INO=$(stat -c "%i" "$T_D0/file5")
setfattr -n scoutfs.hide.indx.aa.$MAJOR.$MINOR "$T_D0/file5"
setfattr -n scoutfs.hide.indx.bb.$MAJOR.$MINOR "$T_D0/file5"
COUNT=$(read_xattr_index 0 $MAJOR.0.0 $MAJOR.-1.-1 | awk '($3 == "'$INO'")' | wc -l)
echo "entries for same position: $COUNT"
rm -f "$T_D0/file5"
t_pass

View File

@@ -53,6 +53,14 @@ exec {FD1}>&- # close
exec {FD2}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== remote unopened unlink deletes"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
rm -f "$T_D1/file"
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
# Hurry along the orphan scanners. If any are currently asleep, we will
# have to wait at least their current scan interval before they wake up,
# run, and notice their new interval.
@@ -60,19 +68,6 @@ t_save_all_sysfs_mount_options orphan_scan_delay_ms
t_set_all_sysfs_mount_options orphan_scan_delay_ms 500
t_wait_for_orphan_scan_runs
echo "== remote unopened unlink deletes"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
rm -f "$T_D1/file"
# cross-mount deletion falls back to the orphan scanner when the
# creating mount still has the inode cached, wait for it to complete
t_force_log_merge
# wait for orphan scanners to pick up the unlinked inode and become idle
t_wait_for_no_orphans
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
echo "== unlink wait for open on other mount"
echo "contents" > "$T_D0/badfile"
ino=$(stat -c "%i" "$T_D0/badfile")
@@ -86,6 +81,7 @@ exec {FD}>&- # close
# we know that revalidating will unhash the remote dentry
stat "$T_D0/badfile" 2>&1 | sed 's/cannot statx/cannot stat/' | t_filter_fs
t_force_log_merge
# wait for orphan scanners to pick up the unlinked inode and become idle
t_wait_for_no_orphans
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"

View File

@@ -1,52 +0,0 @@
#
# Test that orphaned log_trees entries from unmounted rids are
# finalized and merged.
#
# An orphan log_trees entry is one whose rid has no mounted_clients
# entry. This can happen from incomplete reclaim across server
# failovers. We simulate it with the reclaim_skip_finalize trigger
# which makes reclaim_open_log_tree skip the finalization step.
#
t_require_commands touch scoutfs
t_require_mounts 2
TIMEOUT=90
echo "== create orphan log_trees entry via trigger"
sv=$(t_server_nr)
cl=$(t_first_client_nr)
rid=$(t_mount_rid $cl)
touch "$T_D0/file" "$T_D1/file"
sync
# arm the trigger so reclaim skips finalization
t_trigger_arm_silent reclaim_skip_finalize $sv
# force unmount the client, server will fence and reclaim it
# but the trigger makes reclaim leave log_trees unfinalized
t_force_umount $cl
# wait for fencing to run
verify_fenced() {
grep -q "running rid '$rid'" "$T_FENCED_LOG" 2>/dev/null
}
t_wait_until_timeout $TIMEOUT verify_fenced
# give the server time to complete reclaim after fence
sleep 5
# remount the client so t_force_log_merge can sync all mounts.
# the client gets a new rid; the old rid's log_trees is the orphan.
t_mount $cl
echo "== verify orphan is reclaimed and merge completes"
t_force_log_merge
echo "== verify orphan reclaim was logged"
if ! dmesg | grep -q "reclaiming orphan log trees for rid $rid"; then
t_fail "expected orphan reclaim message for rid $rid in dmesg"
fi
t_pass

View File

@@ -1,152 +0,0 @@
t_require_commands scoutfs dd fallocate
FILE="$T_D0/file"
DIR="$T_D0/dir"
echo "== missing options should fail =="
rm -rf $DIR && mkdir -p $DIR
scoutfs punch-offline $DIR -l 4096 -V 0
scoutfs punch-offline $DIR -o 0 -V 0
scoutfs punch-offline $DIR -o 0 -l 4096
echo "== can't hole punch dir or special =="
rm -rf $DIR && mkdir -p $DIR
scoutfs punch-offline $DIR -o 0 -l 4096 -V 0
echo "== punching an empty file does nothing =="
rm -f $FILE && touch $FILE
scoutfs punch-offline $FILE -o 0 -l 4096 -V 0
echo "== punch outside of i_size does nothing =="
dd if=/dev/zero of=$FILE bs=4096 count=1 status=none
scoutfs punch-offline $FILE -o 4096 -l 4096 -V 1
echo "== can't hole punch online extent =="
scoutfs get-fiemap -Lb $FILE
scoutfs punch-offline $FILE -o 0 -l 4096 -V 1
scoutfs get-fiemap -Lb $FILE
echo "== can't hole punch unwritten extent =="
rm -rf $FILE && touch $FILE
fallocate -l $((4096 * 3)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs get-fiemap -Lb $FILE
scoutfs punch-offline $FILE -o 4096 -l 4096 -V $vers
scoutfs get-fiemap -Lb $FILE
echo "== hole punch offline extent =="
rm -rf $FILE && touch $FILE
fallocate -l $((4096 * 3)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version $vers
scoutfs get-fiemap -Lb $FILE
scoutfs punch-offline $FILE -o 4096 -l 4096 -V $vers
scoutfs get-fiemap -Lb $FILE
echo "== can't hole punch non-aligned bsz offset or len =="
rm -rf $FILE && touch $FILE
fallocate -l $((4096 * 3)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version $vers
scoutfs get-fiemap -Lb $FILE
scoutfs punch-offline $FILE -o 4095 -l 4096 -V $vers
scoutfs punch-offline $FILE -o 1 -l 4096 -V $vers
scoutfs punch-offline $FILE -o 4096 -l 409700 -V $vers
scoutfs punch-offline $FILE -o 4096 -l 4097 -V $vers
scoutfs punch-offline $FILE -o 4096 -l 4095 -V $vers
scoutfs punch-offline $FILE -o 4096 -l 1 -V $vers
scoutfs punch-offline $FILE -o 4096 -l 0 -V $vers
scoutfs get-fiemap -Lb $FILE
echo "== can't hole punch mismatched data_version =="
rm -rf $FILE && touch $FILE
fallocate -l $((4096 * 3)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version $vers
scoutfs get-fiemap -Lb $FILE
scoutfs punch-offline $FILE -o 4096 -l 4096 -V 0
scoutfs punch-offline $FILE -o 4096 -l 4096 -V 2
scoutfs punch-offline $FILE -o 4096 -l 4096 -V 9999
scoutfs get-fiemap -Lb $FILE
echo "== Punch hole crossing multiple extents =="
rm -rf $FILE && touch $FILE
fallocate -l $((7 * 4096)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version $vers
scoutfs get-fiemap -L $FILE
scoutfs punch-offline $FILE -o $((1 * 4096)) -l 4096 -V $vers
scoutfs punch-offline $FILE -o $((3 * 4096)) -l 4096 -V $vers
scoutfs punch-offline $FILE -o $((5 * 4096)) -l 4096 -V $vers
# 0.1.2.3
scoutfs get-fiemap -L $FILE
scoutfs punch-offline $FILE -o $((2 * 4096)) -l $((3 * 4096)) -V $vers
# 0.....1
scoutfs get-fiemap -L $FILE
echo "== punch hole starting at a hole =="
rm -rf $FILE && touch $FILE
fallocate -l $((7 * 4096)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version $vers
scoutfs get-fiemap -L $FILE
scoutfs punch-offline $FILE -o $((1 * 4096)) -l 4096 -V $vers
scoutfs punch-offline $FILE -o $((3 * 4096)) -l 4096 -V $vers
scoutfs punch-offline $FILE -o $((5 * 4096)) -l 4096 -V $vers
# 0.1.2.3
scoutfs get-fiemap -L $FILE
scoutfs punch-offline $FILE -o $((1 * 4096)) -l $((5 * 4096)) -V $vers
# 0.....1
scoutfs get-fiemap -L $FILE
echo "== large punch =="
rm -rf $FILE && touch $FILE
fallocate -l $((6 * 1024 * 1024 * 1024)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version $vers
scoutfs get-fiemap -L $FILE
scoutfs punch-offline $FILE -o $((134123 * 4096)) -l $((68343 * 4096)) -V $vers
scoutfs punch-offline $FILE -o $((467273 * 4096)) -l $((68343 * 4096)) -V $vers
scoutfs punch-offline $FILE -o $((734623 * 4096)) -l $((68343 * 4096)) -V $vers
scoutfs get-fiemap -L $FILE
echo "== overlapping punches with lots of extents =="
rm -rf $FILE && touch $FILE
fallocate -l $((4096 * 1024)) $FILE
vers=$(scoutfs stat -s data_version "$FILE")
scoutfs release $FILE --data-version 1
scoutfs get-fiemap -Lb $FILE
# punch odd ones away
for h in $(seq 1 2 1023); do
scoutfs punch-offline $FILE -o $((h * 4096)) -l 4096 -V $vers
done
scoutfs get-fiemap -Lb $FILE | tail -n 1
# punch a large hole from 32 to 55, removing 7 extents
scoutfs punch-offline $FILE -o $((32 * 4096)) -l $((13 * 4096)) -V $vers
scoutfs get-fiemap -Lb $FILE | tail -n 1
# punch every 8th @6
for h in $(seq 6 8 1024); do
scoutfs punch-offline $FILE -o $((h * 4096)) -l 4096 -V $vers
done
# again @4
scoutfs get-fiemap -Lb $FILE | tail -n 1
for h in $(seq 4 8 1024); do
scoutfs punch-offline $FILE -o $((h * 4096)) -l 4096 -V $vers
done
scoutfs get-fiemap -Lb $FILE | tail -n 1
# punching a large hole from 127 to 175, removing 12 extents
scoutfs punch-offline $FILE -o $((127 * 4096)) -l $((48 * 4096)) -V $vers
scoutfs get-fiemap -Lb $FILE
# again @2
for h in $(seq 2 8 1024); do
scoutfs punch-offline $FILE -o $((h * 4096)) -l 4096 -V $vers
done
scoutfs get-fiemap -L $FILE
# and again @0, punching away everything remaining extent
for h in $(seq 0 8 1024); do
scoutfs punch-offline $FILE -o $((h * 4096)) -l 4096 -V $vers
done
scoutfs get-fiemap -Lb $FILE
t_pass

View File

@@ -32,7 +32,7 @@ echo "== dirs shouldn't appear in data_seq queries"
mkdir "$DIR"
ino=$(stat -c "%i" "$DIR")
t_sync_seq_index
query_index data_seq | awk '($4 == "'$ino'")'
query_index data_seq | grep "$ino\>"
echo "== two created files are present and come after each other"
touch "$DIR/first"
@@ -92,13 +92,13 @@ test "$before" -lt "$after" || \
# didn't skip past deleted dirty items
#
echo "== make sure dirtying doesn't livelock walk"
dd if=/dev/urandom of="$DIR/dirtying" bs=4K count=1 >> "$T_TMPDIR/seqres.full" 2>&1
dd if=/dev/urandom of="$DIR/dirtying" bs=4K count=1 >> $seqres.full 2>&1
nr=1
while [ "$nr" -lt 100 ]; do
echo "dirty/walk attempt $nr" >> "$T_TMPDIR/seqres.full"
echo "dirty/walk attempt $nr" >> $seqres.full
sync
dd if=/dev/urandom of="$DIR/dirtying" bs=4K count=1 conv=notrunc \
>> "$T_TMPDIR/seqres.full" 2>&1
>> $seqres.full 2>&1
scoutfs walk-inodes data_seq 0 -1 $DIR/dirtying >& /dev/null
((nr++))
done

View File

@@ -12,12 +12,12 @@ create_file() {
if [ "$blocks" != 0 ]; then
dd if=/dev/urandom bs=4096 count=$blocks of="$file" \
>> "$T_TMPDIR/seqres.full" 2>&1
>> $seqres.full 2>&1
fi
if [ "$remainder" != 0 ]; then
dd if=/dev/urandom bs="$remainder" count=1 of="$file" \
conv=notrunc oflag=append >> "$T_TMPDIR/seqres.full" 2>&1
conv=notrunc oflag=append >> $seqres.full 2>&1
fi
}
@@ -78,7 +78,7 @@ create_file "$FILE" $((4096 * 1024))
cp "$FILE" "$T_TMP"
nr=1
while [ "$nr" -lt 10 ]; do
echo "attempt $nr" >> "$T_TMPDIR/$seqres.full" 2>&1
echo "attempt $nr" >> $seqres.full 2>&1
release_vers "$FILE" stat 0 4096K
sync
echo 3 > /proc/sys/vm/drop_caches

View File

@@ -1,50 +0,0 @@
#
# Test that merge_read_item() correctly updates the sequence number when
# combining delta items from multiple finalized log trees. Each mount
# sets a totl value in its own 3-bit lane (powers of 8) so that any
# double-counting overflows the lane and is caught by: or(v, exp) != exp.
#
t_require_commands setfattr scoutfs
t_require_mounts 5
echo "== setup"
for nr in $(t_fs_nrs); do
d=$(eval echo \$T_D$nr)
for i in $(seq 1 2500); do : > "$d/f$nr$i"; done
done
sync
t_force_log_merge
vals=(1 8 64 512 4096)
expected=4681
n=0
for nr in $(t_fs_nrs); do
d=$(eval echo \$T_D$nr)
v=${vals[$((n++))]}
for i in $(seq 1 2500); do
setfattr -n "scoutfs.totl.t.$i.0.0" -v $v "$d/f$nr$i"
done
done
t_trigger_arm_silent log_merge_force_partial $(t_server_nr)
bad="$T_TMPDIR/bad"
for nr in $(t_fs_nrs); do
( while true; do
echo 1 > "$(t_debugfs_path $nr)/drop_weak_item_cache"
scoutfs read-xattr-totals -p "$(eval echo \$T_M$nr)" | \
awk -F'[ =,]+' -v e=$expected 'or($2+0,e) != e'
done ) >> "$bad" &
done
echo "expected $expected"
t_force_log_merge
t_silent_kill $(jobs -p)
test -s "$bad" && echo "double-counted:" && cat "$bad"
echo "== cleanup"
for nr in $(t_fs_nrs); do
find "$(eval echo \$T_D$nr)" -name "f$nr*" -delete
done
t_pass

View File

@@ -160,15 +160,16 @@ int parse_timespec(char *str, struct timespec *ts)
* Parse a quorum slot specification string "NR,ADDR,PORT" into its
* component parts. We use sscanf to both parse the leading NR and
* trailing PORT integers, and to pull out the inner ADDR string which
* is then parsed to make sure that it's a valid unicast ipv4 address.
* is then parsed to make sure that it's a valid unicast ip address.
* We require that all components be specified, and sccanf will check
* this by the number of matches it returns.
*/
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
{
#define ADDR_CHARS 45 /* max ipv6 */
char addr[ADDR_CHARS + 1] = {'\0',};
#define ADDR_CHARS 45 /* (INET6_ADDRSTRLEN - 1) */
char addr[INET6_ADDRSTRLEN] = {'\0',};
struct in_addr in;
struct in6_addr in6;
int port;
int parsed;
int nr;
@@ -206,15 +207,25 @@ int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
return -EINVAL;
}
if (inet_aton(addr, &in) == 0 || htonl(in.s_addr) == 0 ||
htonl(in.s_addr) == UINT_MAX) {
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
addr, arg);
return -EINVAL;
if (inet_pton(AF_INET, addr, &in) == 1) {
if (htonl(in.s_addr) == 0 || htonl(in.s_addr) == UINT_MAX) {
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
addr, arg);
return -EINVAL;
}
slot->addr.v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
slot->addr.v4.addr = cpu_to_le32(htonl(in.s_addr));
slot->addr.v4.port = cpu_to_le16(port);
} else if (inet_pton(AF_INET6, addr, &in6) == 1) {
if (IN6_IS_ADDR_UNSPECIFIED(&in6) || IN6_IS_ADDR_MULTICAST(&in6)) {
printf("invalid ipv6 address '%s' in quorum slot '%s'\n",
addr, arg);
return -EINVAL;
}
slot->addr.v6.family = cpu_to_le16(SCOUTFS_AF_IPV6);
memcpy(slot->addr.v6.addr, &in6, 16);
slot->addr.v6.port = cpu_to_le16(port);
}
slot->addr.v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
slot->addr.v4.addr = cpu_to_le32(htonl(in.s_addr));
slot->addr.v4.port = cpu_to_le16(port);
return nr;
}

View File

@@ -28,6 +28,7 @@
#include "srch.h"
#include "leaf_item_hash.h"
#include "dev.h"
#include "quorum.h"
static void print_block_header(struct scoutfs_block_header *hdr, int size)
{
@@ -400,12 +401,20 @@ static int print_mounted_client_entry(struct scoutfs_key *key, u64 seq, u8 flags
{
struct scoutfs_mounted_client_btree_val *mcv = val;
struct in_addr in;
char ip6addr[INET6_ADDRSTRLEN];
memset(&in, 0, sizeof(in));
in.s_addr = htonl(le32_to_cpu(mcv->addr.v4.addr));
if (mcv->addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
in.s_addr = htonl(le32_to_cpu(mcv->addr.v4.addr));
printf(" rid %016llx ipv4_addr %s flags 0x%x\n",
le64_to_cpu(key->skmc_rid), inet_ntoa(in), mcv->flags);
printf(" rid %016llx ipv4_addr %s flags 0x%x\n",
le64_to_cpu(key->skmc_rid), inet_ntoa(in), mcv->flags);
} else if (mcv->addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
printf(" rid %016llx ipv6_addr %s flags 0x%x\n",
le64_to_cpu(key->skmc_rid),
inet_ntop(AF_INET, mcv->addr.v6.addr, ip6addr, INET6_ADDRSTRLEN),
mcv->flags);
}
return 0;
}
@@ -891,26 +900,40 @@ static int print_btree_leaf_items(int fd, struct scoutfs_super_block *super,
static char *alloc_addr_str(union scoutfs_inet_addr *ia)
{
struct in_addr addr;
char ip6addr[INET6_ADDRSTRLEN];
char *quad;
char *str;
int len;
memset(&addr, 0, sizeof(addr));
addr.s_addr = htonl(le32_to_cpu(ia->v4.addr));
quad = inet_ntoa(addr);
if (quad == NULL)
return NULL;
if (le16_to_cpu(ia->v4.family) == SCOUTFS_AF_IPV4) {
memset(&addr, 0, sizeof(addr));
addr.s_addr = htonl(le32_to_cpu(ia->v4.addr));
quad = inet_ntoa(addr);
if (quad == NULL)
return NULL;
len = snprintf(NULL, 0, "%s:%u", quad, le16_to_cpu(ia->v4.port));
if (len < 1 || len > 22)
return NULL;
len = snprintf(NULL, 0, "%s:%u", quad, le16_to_cpu(ia->v4.port));
if (len < 1 || len > 22)
return NULL;
len++; /* null */
str = malloc(len);
if (!str)
return NULL;
len++; /* null */
str = malloc(len);
if (!str)
return NULL;
snprintf(str, len, "%s:%u", quad, le16_to_cpu(ia->v4.port));
snprintf(str, len, "%s:%u", quad, le16_to_cpu(ia->v4.port));
} else if (le16_to_cpu(ia->v6.family) == SCOUTFS_AF_IPV6) {
if (inet_ntop(AF_INET6, ia->v6.addr, ip6addr, INET6_ADDRSTRLEN) == NULL)
return NULL;
len = strlen(ip6addr) + 9; /* "[]:\0" (4) plus max strlen(u16) (5) */
str = malloc(len);
if (!str)
return NULL;
snprintf(str, len, "[%s]:%u", ip6addr, le16_to_cpu(ia->v6.port));
} else
return NULL;
return str;
}
@@ -1026,7 +1049,7 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
printf(" quorum config version %llu\n",
le64_to_cpu(super->qconf.version));
for (i = 0; i < array_size(super->qconf.slots); i++) {
if (super->qconf.slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
if (!quorum_slot_present(super, i))
continue;
addr = alloc_addr_str(&super->qconf.slots[i].addr);

View File

@@ -1,127 +0,0 @@
#include <sys/ioctl.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <argp.h>
#include "sparse.h"
#include "parse.h"
#include "util.h"
#include "ioctl.h"
#include "cmd.h"
struct po_args {
char *path;
u64 offset;
u64 length;
u64 data_version;
unsigned offset_set:1,
length_set:1,
data_version_set:1;
};
static int do_punch_offline(struct po_args *args)
{
struct scoutfs_ioctl_punch_offline ioctl_args;
int ret;
int fd;
fd = get_path(args->path, O_RDWR);
if (fd < 0)
return fd;
ioctl_args.offset = args->offset;
ioctl_args.len = args->length;
ioctl_args.data_version = args->data_version;
ioctl_args.flags = 0;
ret = ioctl(fd, SCOUTFS_IOC_PUNCH_OFFLINE, &ioctl_args);
if (ret < 0) {
ret = -errno;
fprintf(stderr, "punch_offline ioctl failed: %s (%d)\n",
strerror(errno), errno);
}
close(fd);
return ret;
}
static int parse_opt(int key, char *arg, struct argp_state *state)
{
struct po_args *args = state->input;
int ret = 0;
switch (key) {
case 'V':
ret = parse_u64(arg, &args->data_version);
if (ret)
return ret;
args->data_version_set = 1;
break;
case 'o': /* offset */
ret = parse_human(arg, &args->offset);
if (ret)
return ret;
args->offset_set = 1;
break;
case 'l': /* length */
ret = parse_human(arg, &args->length);
if (ret)
return ret;
args->length_set = 1;
break;
case ARGP_KEY_ARG:
if (!args->path)
args->path = strdup_or_error(state, arg);
else
argp_error(state, "unknown extra argument given");
break;
case ARGP_KEY_FINI:
if (!args->path)
argp_error(state, "must provide path to file");
if (!args->offset_set)
argp_error(state, "must provide offset");
if (!args->length_set)
argp_error(state, "must provide length");
if (!args->data_version_set)
argp_error(state, "must provide data_version");
break;
default:
break;
}
return 0;
}
static struct argp_option options[] = {
{ "data-version", 'V', "VERSION", 0, "Data version of the file [Required]"},
{ "offset", 'o', "OFFSET", 0, "Offset (bytes or KMGTP units) in file to stage [Required]"},
{ "length", 'l', "LENGTH", 0, "Length of range (bytes or KMGTP units) of file to stage. [Required]"},
{ NULL }
};
static struct argp argp = {
options,
parse_opt,
"PATH",
"Make a (sparse) hole in the file at offset and with length"
};
static int punch_offline_cmd(int argc, char **argv)
{
struct po_args po_args = {NULL};
int ret;
ret = argp_parse(&argp, argc, argv, 0, NULL, &po_args);
if (ret)
return ret;
return do_punch_offline(&po_args);
}
static void __attribute__((constructor)) punch_offline_ctor(void)
{
cmd_register_argp("punch-offline", &argp, GROUP_AGENT, punch_offline_cmd);
}

View File

@@ -10,7 +10,8 @@
bool quorum_slot_present(struct scoutfs_super_block *super, int i)
{
return super->qconf.slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4);
return ((super->qconf.slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) ||
(super->qconf.slots[i].addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)));
}
bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
@@ -18,35 +19,40 @@ bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
struct in_addr in;
bool valid = true;
char *addr;
char ip6addr[INET6_ADDRSTRLEN];
int i;
int j;
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
if (slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_NONE))
continue;
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4)) {
if (slots[i].addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if (slots[i].addr.v4.addr == slots[j].addr.v4.addr &&
slots[i].addr.v4.port == slots[j].addr.v4.port) {
in.s_addr =
htonl(le32_to_cpu(slots[i].addr.v4.addr));
addr = inet_ntoa(in);
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
i, j, addr,
le16_to_cpu(slots[i].addr.v4.port));
valid = false;
}
}
} else if (slots[i].addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if ((IN6_ARE_ADDR_EQUAL(slots[i].addr.v6.addr, slots[j].addr.v6.addr)) &&
(slots[i].addr.v6.port == slots[j].addr.v6.port)) {
fprintf(stderr, "quorum slot nr %u and %u have the same address [%s]:%u\n",
i, j,
inet_ntop(AF_INET6, slots[i].addr.v6.addr, ip6addr, INET6_ADDRSTRLEN),
le16_to_cpu(slots[i].addr.v6.port));
valid = false;
}
}
} else if (slots[i].addr.v6.family != cpu_to_le16(SCOUTFS_AF_NONE)) {
fprintf(stderr, "quorum slot nr %u has invalid family %u\n",
i, le16_to_cpu(slots[i].addr.v4.family));
valid = false;
}
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
if (slots[i].addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
continue;
if (slots[i].addr.v4.addr == slots[j].addr.v4.addr &&
slots[i].addr.v4.port == slots[j].addr.v4.port) {
in.s_addr =
htonl(le32_to_cpu(slots[i].addr.v4.addr));
addr = inet_ntoa(in);
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
i, j, addr,
le16_to_cpu(slots[i].addr.v4.port));
valid = false;
}
}
}
return valid;
@@ -61,19 +67,23 @@ void print_quorum_slots(struct scoutfs_quorum_slot *slots, int nr, char *indent)
{
struct scoutfs_quorum_slot *sl;
struct in_addr in;
char ip6addr[INET6_ADDRSTRLEN];
bool first = true;
int i;
for (i = 0, sl = slots; i < SCOUTFS_QUORUM_MAX_SLOTS; i++, sl++) {
if (sl->addr.v4.family == cpu_to_le16(SCOUTFS_AF_IPV4)) {
in.s_addr = htonl(le32_to_cpu(sl->addr.v4.addr));
printf("%s%u: %s:%u\n", first ? "" : indent,
i, inet_ntoa(in), le16_to_cpu(sl->addr.v4.port));
if (sl->addr.v4.family != cpu_to_le16(SCOUTFS_AF_IPV4))
continue;
in.s_addr = htonl(le32_to_cpu(sl->addr.v4.addr));
printf("%s%u: %s:%u\n", first ? "" : indent,
i, inet_ntoa(in), le16_to_cpu(sl->addr.v4.port));
first = false;
first = false;
} else if (sl->addr.v6.family == cpu_to_le16(SCOUTFS_AF_IPV6)) {
printf("%s%u: [%s]:%u\n", first ? "" : indent, i,
inet_ntop(AF_INET6, sl->addr.v6.addr, ip6addr, INET6_ADDRSTRLEN),
le16_to_cpu(sl->addr.v6.port));
first = false;
}
}
}