Compare commits

..

1 Commits

Author SHA1 Message Date
Zach Brown
8ee41caa24 scoutfs: introduce CodingStyle.txt
Add a coding style document that tries to record the conventions that
the project uses.  It seemed more appropriate to put it up in the -kmod
git repo context rather than in src/ which would end up in fs/scoutfs/
upstream.

Signed-off-by: Zach Brown <zab@versity.com>
2021-03-31 10:55:18 -07:00
48 changed files with 564 additions and 2709 deletions

82
CodingStyle.txt Normal file
View File

@@ -0,0 +1,82 @@
We try to maintain a consistent coding style across the project. It's
admitedly arbitrary and starts with and is based on upstream's
Documentation/CodingStyle. Conventions are added here as they come up
during review. We'll demonstrate each sylistic preference with a diff
snippet.
== Try to make one exit point for reasonably long functions
{
- void *a;
- void *b;
+ void *a = NULL;
+ void *b = NULL;
+ int ret;
a = kalloc();
- if (!a)
- return 1;
+ if (!a) {
+ ret = 1;
+ goto out;
+ }
b = kalloc();
if (!b) {
- kfree(a);
- return 2;
+ ret = 2;
+ goto out;
}
- return 3
+ ret = 3;
+out:
+ kfree(a);
+ kfree(b);
+ return ret;
}
The idea is to initialize all state at the top of the function,
modifying it throughout, and clean it all up at the end. Having one
exit point also gives us a place to add tracing of function exit.
== Multiple declarations on a line
- int i, j;
+ int i;
+ int j;
Declare function variables one per line. The verbose declarations
create pressure to think about excessive stack use or over-long
functions, makes initializers clear, and leaves room for comments.
== Balance braces
- if (IS_ERR(super_block))
+ if (IS_ERR(super_block)) {
return PTR_ERR(super_block);
- else {
+ } else {
*super_res = *super_block;
kfree(super_block);
return 0;
}
*nervous twitch*
== Cute variable defintion waterfalls
+ struct block_device *meta_bdev;
struct scoutfs_sb_info *sbi;
struct mount_options opts;
- struct block_device *meta_bdev;
struct inode *inode;
This isn't strictly necessary, but it's nice to try and make a pretty
descending length of variable distributions. It often has the
accidental effect of sorting definitions by decreasing complexity. I
tend to group types when the name lengths are pretty close, even if
they're not strictly sorted, so that all the ints, u64s, keys, etc, are
all together.

View File

@@ -27,11 +27,9 @@ scoutfs-y += \
lock_server.o \
msg.o \
net.o \
omap.o \
options.o \
per_task.o \
quorum.o \
recov.o \
scoutfs_trace.o \
server.o \
sort_priv.o \

View File

@@ -128,7 +128,6 @@ static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
{
struct block_private *bp;
unsigned int noio_flags;
/*
* If we had multiple blocks per page we'd need to be a little
@@ -148,19 +147,8 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
set_bit(BLOCK_BIT_PAGE_ALLOC, &bp->bits);
bp->bl.data = page_address(bp->page);
} else {
/*
* __vmalloc doesn't pass the gfp flags down to pte
* allocs, they're done with user alloc flags.
* Unfortunately, some lockdep doesn't know that
* PF_NOMEMALLOC prevents __GFP_FS reclaim and generates
* spurious reclaim-on dependencies and warnings.
*/
lockdep_off();
noio_flags = memalloc_noio_save();
bp->virt = __vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
memalloc_noio_restore(noio_flags);
lockdep_on();
bp->virt = __vmalloc(SCOUTFS_BLOCK_LG_SIZE,
GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
if (!bp->virt) {
kfree(bp);
bp = NULL;
@@ -298,16 +286,10 @@ static int block_insert(struct super_block *sb, struct block_private *bp)
WARN_ON_ONCE(atomic_read(&bp->refcount) & BLOCK_REF_INSERTED);
retry:
atomic_add(BLOCK_REF_INSERTED, &bp->refcount);
ret = rhashtable_lookup_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
ret = rhashtable_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
if (ret < 0) {
atomic_sub(BLOCK_REF_INSERTED, &bp->refcount);
if (ret == -EBUSY) {
/* wait for pending rebalance to finish */
synchronize_rcu();
goto retry;
}
} else {
atomic_inc(&binf->total_inserted);
TRACE_BLOCK(insert, bp);
@@ -414,7 +396,6 @@ static void block_remove_all(struct super_block *sb)
if (block_get_if_inserted(bp)) {
block_remove(sb, bp);
WARN_ON_ONCE(atomic_read(&bp->refcount) != 1);
block_put(sb, bp);
}
}
@@ -1092,11 +1073,10 @@ restart:
if (bp == NULL)
break;
if (bp == ERR_PTR(-EAGAIN)) {
/* hard exit to wait for rcu rebalance to finish */
/* hard reset to not hold rcu grace period across retries */
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
scoutfs_inc_counter(sb, block_cache_shrink_restart);
synchronize_rcu();
goto restart;
}
@@ -1257,7 +1237,7 @@ out:
if (ret)
scoutfs_block_destroy(sb);
return ret;
return 0;
}
void scoutfs_block_destroy(struct super_block *sb)

View File

@@ -31,7 +31,6 @@
#include "net.h"
#include "endian_swap.h"
#include "quorum.h"
#include "omap.h"
/*
* The client is responsible for maintaining a connection to the server.
@@ -151,7 +150,7 @@ static int client_lock_response(struct super_block *sb,
void *resp, unsigned int resp_len,
int error, void *data)
{
if (resp_len != sizeof(struct scoutfs_net_lock))
if (resp_len != sizeof(struct scoutfs_net_lock_grant_response))
return -EINVAL;
/* XXX error? */
@@ -216,39 +215,6 @@ int scoutfs_client_srch_commit_compact(struct super_block *sb,
res, sizeof(*res), NULL, 0);
}
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
struct scoutfs_open_ino_map *map)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
return scoutfs_net_response(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
id, 0, map, sizeof(*map));
}
/* The client is receiving an omap request from the server */
static int client_open_ino_map(struct super_block *sb, struct scoutfs_net_connection *conn,
u8 cmd, u64 id, void *arg, u16 arg_len)
{
if (arg_len != sizeof(struct scoutfs_open_ino_map_args))
return -EINVAL;
return scoutfs_omap_client_handle_request(sb, id, arg);
}
/* The client is sending an omap request to the server */
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
struct scoutfs_open_ino_map *map)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
struct scoutfs_open_ino_map_args args = {
.group_nr = cpu_to_le64(group_nr),
.req_id = 0,
};
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
&args, sizeof(args), map, sizeof(*map));
}
/* The client is receiving a invalidation request from the server */
static int client_lock(struct super_block *sb,
struct scoutfs_net_connection *conn, u8 cmd, u64 id,
@@ -447,7 +413,6 @@ out:
static scoutfs_net_request_t client_req_funcs[] = {
[SCOUTFS_NET_CMD_LOCK] = client_lock,
[SCOUTFS_NET_CMD_LOCK_RECOVER] = client_lock_recover,
[SCOUTFS_NET_CMD_OPEN_INO_MAP] = client_open_ino_map,
};
/*

View File

@@ -22,10 +22,6 @@ int scoutfs_client_srch_get_compact(struct super_block *sb,
struct scoutfs_srch_compact *sc);
int scoutfs_client_srch_commit_compact(struct super_block *sb,
struct scoutfs_srch_compact *res);
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
struct scoutfs_open_ino_map *map);
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
struct scoutfs_open_ino_map *map);
int scoutfs_client_setup(struct super_block *sb);
void scoutfs_client_destroy(struct super_block *sb);

View File

@@ -1135,8 +1135,7 @@ static void truncate_inode_pages_extent(struct inode *inode, u64 start, u64 len)
*/
#define MOVE_DATA_EXTENTS_PER_HOLD 16
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
u64 byte_len, struct inode *to, u64 to_off, bool is_stage,
u64 data_version)
u64 byte_len, struct inode *to, u64 to_off)
{
struct scoutfs_inode_info *from_si = SCOUTFS_I(from);
struct scoutfs_inode_info *to_si = SCOUTFS_I(to);
@@ -1146,7 +1145,6 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
struct data_ext_args from_args;
struct data_ext_args to_args;
struct scoutfs_extent ext;
struct timespec cur_time;
LIST_HEAD(locks);
bool done = false;
loff_t from_size;
@@ -1182,11 +1180,6 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
goto out;
}
if (is_stage && (data_version != SCOUTFS_I(to)->data_version)) {
ret = -ESTALE;
goto out;
}
from_iblock = from_off >> SCOUTFS_BLOCK_SM_SHIFT;
count = (byte_len + SCOUTFS_BLOCK_SM_MASK) >> SCOUTFS_BLOCK_SM_SHIFT;
to_iblock = to_off >> SCOUTFS_BLOCK_SM_SHIFT;
@@ -1209,7 +1202,7 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
/* can't stage once data_version changes */
scoutfs_inode_get_onoff(from, &junk, &from_offline);
scoutfs_inode_get_onoff(to, &junk, &to_offline);
if (from_offline || (to_offline && !is_stage)) {
if (from_offline || to_offline) {
ret = -ENODATA;
goto out;
}
@@ -1253,8 +1246,6 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
/* arbitrarily limit the number of extents per trans hold */
for (i = 0; i < MOVE_DATA_EXTENTS_PER_HOLD; i++) {
struct scoutfs_extent off_ext;
/* find the next extent to move */
ret = scoutfs_ext_next(sb, &data_ext_ops, &from_args,
from_iblock, 1, &ext);
@@ -1283,27 +1274,10 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
to_start = to_iblock + (from_start - from_iblock);
if (is_stage) {
ret = scoutfs_ext_next(sb, &data_ext_ops, &to_args,
to_start, 1, &off_ext);
if (ret)
break;
if (!scoutfs_ext_inside(to_start, len, &off_ext) ||
!(off_ext.flags & SEF_OFFLINE)) {
ret = -EINVAL;
break;
}
ret = scoutfs_ext_set(sb, &data_ext_ops, &to_args,
to_start, len,
map, ext.flags);
} else {
/* insert the new, fails if it overlaps */
ret = scoutfs_ext_insert(sb, &data_ext_ops, &to_args,
to_start, len,
map, ext.flags);
}
/* insert the new, fails if it overlaps */
ret = scoutfs_ext_insert(sb, &data_ext_ops, &to_args,
to_start, len,
map, ext.flags);
if (ret < 0)
break;
@@ -1311,18 +1285,10 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
ret = scoutfs_ext_set(sb, &data_ext_ops, &from_args,
from_start, len, 0, 0);
if (ret < 0) {
if (is_stage) {
/* re-mark dest range as offline */
WARN_ON_ONCE(!(off_ext.flags & SEF_OFFLINE));
err = scoutfs_ext_set(sb, &data_ext_ops, &to_args,
to_start, len,
0, off_ext.flags);
} else {
/* remove inserted new on err */
err = scoutfs_ext_remove(sb, &data_ext_ops,
&to_args, to_start,
len);
}
/* remove inserted new on err */
err = scoutfs_ext_remove(sb, &data_ext_ops,
&to_args, to_start,
len);
BUG_ON(err); /* XXX inconsistent */
break;
}
@@ -1350,15 +1316,12 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
up_write(&from_si->extent_sem);
up_write(&to_si->extent_sem);
cur_time = CURRENT_TIME;
if (!is_stage) {
to->i_ctime = to->i_mtime = cur_time;
scoutfs_inode_inc_data_version(to);
scoutfs_inode_set_data_seq(to);
}
from->i_ctime = from->i_mtime = cur_time;
from->i_ctime = from->i_mtime =
to->i_ctime = to->i_mtime = CURRENT_TIME;
scoutfs_inode_inc_data_version(from);
scoutfs_inode_inc_data_version(to);
scoutfs_inode_set_data_seq(from);
scoutfs_inode_set_data_seq(to);
scoutfs_update_inode_item(from, from_lock, &locks);
scoutfs_update_inode_item(to, to_lock, &locks);

View File

@@ -59,8 +59,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len);
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
struct scoutfs_lock *lock);
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
u64 byte_len, struct inode *to, u64 to_off, bool to_stage,
u64 data_version);
u64 byte_len, struct inode *to, u64 to_off);
int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
u8 sef, u8 op, struct scoutfs_data_wait *ow,

View File

@@ -30,7 +30,6 @@
#include "item.h"
#include "lock.h"
#include "hash.h"
#include "omap.h"
#include "counters.h"
#include "scoutfs_trace.h"
@@ -814,7 +813,6 @@ static int scoutfs_link(struct dentry *old_dentry,
struct scoutfs_lock *dir_lock;
struct scoutfs_lock *inode_lock = NULL;
LIST_HEAD(ind_locks);
bool del_orphan;
u64 dir_size;
u64 ind_seq;
u64 hash;
@@ -843,8 +841,6 @@ static int scoutfs_link(struct dentry *old_dentry,
goto out_unlock;
dir_size = i_size_read(dir) + dentry->d_name.len;
del_orphan = (inode->i_nlink == 0);
retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
@@ -859,12 +855,6 @@ retry:
if (ret)
goto out;
if (del_orphan) {
ret = scoutfs_orphan_dirty(sb, scoutfs_ino(inode));
if (ret)
goto out;
}
pos = SCOUTFS_I(dir)->next_readdir_pos++;
ret = add_entry_items(sb, scoutfs_ino(dir), hash, pos,
@@ -880,11 +870,6 @@ retry:
inode->i_ctime = dir->i_mtime;
inc_nlink(inode);
if (del_orphan) {
ret = scoutfs_orphan_delete(sb, scoutfs_ino(inode));
WARN_ON_ONCE(ret);
}
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
@@ -1775,42 +1760,6 @@ static int scoutfs_dir_open(struct inode *inode, struct file *file)
}
#endif
static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = NULL;
struct scoutfs_lock *dir_lock = NULL;
struct scoutfs_lock *inode_lock = NULL;
LIST_HEAD(ind_locks);
int ret;
if (dentry->d_name.len > SCOUTFS_NAME_LEN)
return -ENAMETOOLONG;
inode = lock_hold_create(dir, dentry, mode, 0,
&dir_lock, &inode_lock, &ind_locks);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
insert_inode_hash(inode);
d_tmpfile(dentry, inode);
scoutfs_update_inode_item(inode, inode_lock, &ind_locks);
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
scoutfs_inode_index_unlock(sb, &ind_locks);
ret = scoutfs_orphan_inode(inode);
WARN_ON_ONCE(ret); /* XXX returning error but items deleted */
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &ind_locks);
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_WRITE);
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
return ret;
}
const struct file_operations scoutfs_dir_fops = {
.KC_FOP_READDIR = scoutfs_readdir,
#ifdef KC_FMODE_KABI_ITERATE
@@ -1821,10 +1770,7 @@ const struct file_operations scoutfs_dir_fops = {
.llseek = generic_file_llseek,
};
const struct inode_operations_wrapper scoutfs_dir_iops = {
.ops = {
const struct inode_operations scoutfs_dir_iops = {
.lookup = scoutfs_lookup,
.mknod = scoutfs_mknod,
.create = scoutfs_create,
@@ -1841,8 +1787,6 @@ const struct inode_operations_wrapper scoutfs_dir_iops = {
.removexattr = scoutfs_removexattr,
.symlink = scoutfs_symlink,
.permission = scoutfs_permission,
},
.tmpfile = scoutfs_tmpfile,
};
void scoutfs_dir_exit(void)

View File

@@ -5,7 +5,7 @@
#include "lock.h"
extern const struct file_operations scoutfs_dir_fops;
extern const struct inode_operations_wrapper scoutfs_dir_iops;
extern const struct inode_operations scoutfs_dir_iops;
extern const struct inode_operations scoutfs_symlink_iops;
struct scoutfs_link_backref_entry {
@@ -14,7 +14,7 @@ struct scoutfs_link_backref_entry {
u64 dir_pos;
u16 name_len;
struct scoutfs_dirent dent;
/* the full name is allocated and stored in dent.name[] */
/* the full name is allocated and stored in dent.name[0] */
};
int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,

View File

@@ -38,7 +38,7 @@ static bool ext_overlap(struct scoutfs_extent *ext, u64 start, u64 len)
return !(e_end < start || ext->start > end);
}
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
static bool ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
{
u64 in_end = start + len - 1;
u64 out_end = out->start + out->len - 1;
@@ -241,7 +241,7 @@ int scoutfs_ext_remove(struct super_block *sb, struct scoutfs_ext_ops *ops,
goto out;
/* removed extent must be entirely within found */
if (!scoutfs_ext_inside(start, len, &found)) {
if (!ext_inside(start, len, &found)) {
ret = -EINVAL;
goto out;
}
@@ -341,7 +341,7 @@ int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
if (ret == 0 && ext_overlap(&found, start, len)) {
/* set extent must be entirely within found */
if (!scoutfs_ext_inside(start, len, &found)) {
if (!ext_inside(start, len, &found)) {
ret = -EINVAL;
goto out;
}

View File

@@ -31,6 +31,5 @@ int scoutfs_ext_alloc(struct super_block *sb, struct scoutfs_ext_ops *ops,
struct scoutfs_extent *ext);
int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
void *arg, u64 start, u64 len, u64 map, u8 flags);
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out);
#endif

View File

@@ -27,14 +27,8 @@
#include "file.h"
#include "inode.h"
#include "per_task.h"
#include "omap.h"
/*
* Start a high level file read. We check for offline extents in the
* read region here so that we only check the extents once. We use the
* dio count to prevent releasing while we're reading after we've
* checked the extents.
*/
/* TODO: Direct I/O, AIO */
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
@@ -48,32 +42,30 @@ ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
int ret;
retry:
/* protect checked extents from release */
mutex_lock(&inode->i_mutex);
atomic_inc(&inode->i_dio_count);
mutex_unlock(&inode->i_mutex);
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
if (ret)
goto out;
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
/* protect checked extents from stage/release */
mutex_lock(&inode->i_mutex);
atomic_inc(&inode->i_dio_count);
mutex_unlock(&inode->i_mutex);
ret = scoutfs_data_wait_check_iov(inode, iov, nr_segs, pos,
SEF_OFFLINE,
SCOUTFS_IOC_DWO_READ,
&dw, inode_lock);
if (ret != 0)
goto out;
} else {
WARN_ON_ONCE(true);
}
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
out:
inode_dio_done(inode);
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
if (scoutfs_per_task_del(&si->pt_data_lock, &pt_ent))
inode_dio_done(inode);
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
if (scoutfs_data_wait_found(&dw)) {

View File

@@ -276,6 +276,7 @@ int scoutfs_forest_read_items(struct super_block *sb,
scoutfs_inc_counter(sb, forest_read_items);
calc_bloom_nrs(&bloom, &lock->start);
roots = lock->roots;
retry:
ret = scoutfs_client_get_roots(sb, &roots);
if (ret)
@@ -348,9 +349,15 @@ retry:
ret = 0;
out:
if (ret == -ESTALE) {
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
return -EIO;
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0) {
ret = -EIO;
goto out;
}
prev_refs = refs;
ret = scoutfs_client_get_roots(sb, &roots);
if (ret)
goto out;
goto retry;
}

View File

@@ -195,6 +195,9 @@ struct scoutfs_key {
#define sklt_rid _sk_first
#define sklt_nr _sk_second
/* lock clients */
#define sklc_rid _sk_first
/* seqs */
#define skts_trans_seq _sk_first
#define skts_rid _sk_second
@@ -256,7 +259,7 @@ struct scoutfs_btree_block {
__le16 mid_free_len;
__u8 level;
__u8 __pad[7];
struct scoutfs_btree_item items[];
struct scoutfs_btree_item items[0];
/* leaf blocks have a fixed size item offset hash table at the end */
};
@@ -304,7 +307,7 @@ struct scoutfs_alloc_list_block {
struct scoutfs_block_ref next;
__le32 start;
__le32 nr;
__le64 blknos[]; /* naturally aligned for sorting */
__le64 blknos[0]; /* naturally aligned for sorting */
};
#define SCOUTFS_ALLOC_LIST_MAX_BLOCKS \
@@ -359,7 +362,7 @@ struct scoutfs_srch_file {
struct scoutfs_srch_parent {
struct scoutfs_block_header hdr;
struct scoutfs_block_ref refs[];
struct scoutfs_block_ref refs[0];
};
#define SCOUTFS_SRCH_PARENT_REFS \
@@ -374,7 +377,7 @@ struct scoutfs_srch_block {
struct scoutfs_srch_entry tail;
__le32 entry_nr;
__le32 entry_bytes;
__u8 entries[];
__u8 entries[0];
};
/*
@@ -449,7 +452,7 @@ struct scoutfs_log_item_value {
__le64 vers;
__u8 flags;
__u8 __pad[7];
__u8 data[];
__u8 data[0];
};
/*
@@ -464,7 +467,7 @@ struct scoutfs_log_item_value {
struct scoutfs_bloom_block {
struct scoutfs_block_header hdr;
__le64 total_set;
__le64 bits[];
__le64 bits[0];
};
/*
@@ -490,10 +493,11 @@ struct scoutfs_bloom_block {
#define SCOUTFS_LOCK_ZONE 4
/* Items only stored in server btrees */
#define SCOUTFS_LOG_TREES_ZONE 6
#define SCOUTFS_TRANS_SEQ_ZONE 7
#define SCOUTFS_MOUNTED_CLIENT_ZONE 8
#define SCOUTFS_SRCH_ZONE 9
#define SCOUTFS_FREE_EXTENT_ZONE 10
#define SCOUTFS_LOCK_CLIENTS_ZONE 7
#define SCOUTFS_TRANS_SEQ_ZONE 8
#define SCOUTFS_MOUNTED_CLIENT_ZONE 9
#define SCOUTFS_SRCH_ZONE 10
#define SCOUTFS_FREE_EXTENT_ZONE 11
/* inode index zone */
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 1
@@ -545,7 +549,7 @@ struct scoutfs_xattr {
__le16 val_len;
__u8 name_len;
__u8 __pad[5];
__u8 name[];
__u8 name[0];
};
@@ -649,6 +653,7 @@ struct scoutfs_super_block {
struct scoutfs_alloc_list_head server_meta_freed[2];
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
struct scoutfs_btree_root lock_clients;
struct scoutfs_btree_root trans_seqs;
struct scoutfs_btree_root mounted_clients;
struct scoutfs_btree_root srch_root;
@@ -724,7 +729,7 @@ struct scoutfs_dirent {
__le64 pos;
__u8 type;
__u8 __pad[7];
__u8 name[];
__u8 name[0];
};
#define SCOUTFS_NAME_LEN 255
@@ -822,7 +827,7 @@ struct scoutfs_net_header {
__u8 flags;
__u8 error;
__u8 __pad[3];
__u8 data[];
__u8 data[0];
};
#define SCOUTFS_NET_FLAG_RESPONSE (1 << 0)
@@ -840,7 +845,6 @@ enum scoutfs_net_cmd {
SCOUTFS_NET_CMD_LOCK_RECOVER,
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
SCOUTFS_NET_CMD_OPEN_INO_MAP,
SCOUTFS_NET_CMD_FAREWELL,
SCOUTFS_NET_CMD_UNKNOWN,
};
@@ -891,10 +895,15 @@ struct scoutfs_net_lock {
__u8 __pad[6];
};
struct scoutfs_net_lock_grant_response {
struct scoutfs_net_lock nl;
struct scoutfs_net_roots roots;
};
struct scoutfs_net_lock_recover {
__le16 nr;
__u8 __pad[6];
struct scoutfs_net_lock locks[];
struct scoutfs_net_lock locks[0];
};
#define SCOUTFS_NET_LOCK_MAX_RECOVER_NR \
@@ -961,42 +970,4 @@ enum scoutfs_corruption_sources {
#define SC_NR_LONGS DIV_ROUND_UP(SC_NR_SOURCES, BITS_PER_LONG)
#define SCOUTFS_OPEN_INO_MAP_SHIFT 10
#define SCOUTFS_OPEN_INO_MAP_BITS (1 << SCOUTFS_OPEN_INO_MAP_SHIFT)
#define SCOUTFS_OPEN_INO_MAP_MASK (SCOUTFS_OPEN_INO_MAP_BITS - 1)
#define SCOUTFS_OPEN_INO_MAP_LE64S (SCOUTFS_OPEN_INO_MAP_BITS / 64)
/*
* The request and response conversation is as follows:
*
* client[init] -> server:
* group_nr = G
* req_id = 0 (I)
* server -> client[*]
* group_nr = G
* req_id = R
* client[*] -> server
* group_nr = G (I)
* req_id = R
* bits
* server -> client[init]
* group_nr = G (I)
* req_id = R (I)
* bits
*
* Many of the fields in individual messages are ignored ("I") because
* the net id or the omap req_id can be used to identify the
* conversation. We always include them on the wire to make inspected
* messages easier to follow.
*/
struct scoutfs_open_ino_map_args {
__le64 group_nr;
__le64 req_id;
};
struct scoutfs_open_ino_map {
struct scoutfs_open_ino_map_args args;
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
};
#endif

View File

@@ -33,7 +33,6 @@
#include "item.h"
#include "client.h"
#include "cmp.h"
#include "omap.h"
/*
* XXX
@@ -83,8 +82,6 @@ static void scoutfs_inode_ctor(void *obj)
init_waitqueue_head(&si->data_waitq.waitq);
init_rwsem(&si->xattr_rwsem);
RB_CLEAR_NODE(&si->writeback_node);
scoutfs_lock_init_coverage(&si->ino_lock_cov);
atomic_set(&si->inv_iput_count, 0);
inode_init_once(&si->inode);
}
@@ -144,15 +141,12 @@ static void remove_writeback_inode(struct inode_sb_info *inf,
void scoutfs_destroy_inode(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
DECLARE_INODE_SB_INFO(inode->i_sb, inf);
spin_lock(&inf->writeback_lock);
remove_writeback_inode(inf, SCOUTFS_I(inode));
spin_unlock(&inf->writeback_lock);
scoutfs_lock_del_coverage(inode->i_sb, &si->ino_lock_cov);
call_rcu(&inode->i_rcu, scoutfs_i_callback);
}
@@ -188,8 +182,7 @@ static void set_inode_ops(struct inode *inode)
inode->i_fop = &scoutfs_file_fops;
break;
case S_IFDIR:
inode->i_op = &scoutfs_dir_iops.ops;
inode->i_flags |= S_IOPS_WRAPPER;
inode->i_op = &scoutfs_dir_iops;
inode->i_fop = &scoutfs_dir_fops;
break;
case S_IFLNK:
@@ -313,8 +306,6 @@ int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
if (ret == 0) {
load_inode(inode, &sinode);
atomic64_set(&si->last_refreshed, refresh_gen);
scoutfs_lock_add_coverage(sb, lock, &si->ino_lock_cov);
si->drop_invalidated = false;
}
} else {
ret = 0;
@@ -672,28 +663,6 @@ struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino)
return ilookup5(sb, ino, scoutfs_iget_test, &ino);
}
static int iget_test_nofreeing(struct inode *inode, void *arg)
{
return !(inode->i_state & I_FREEING) && scoutfs_iget_test(inode, arg);
}
/*
* There's a natural risk of a deadlock between lock invalidation and
* eviction. Invalidation blocks locks while looking up inodes and
* invalidating local caches. Inode eviction gets a lock to check final
* inode deletion while the inode is marked FREEING which blocks
* lookups.
*
* We have a lookup variant which doesn't return I_FREEING inodes
* instead of waiting on them. If an inode has made it to I_FREEING
* then it doesn't have any local caches that are reachable and the lock
* invalidation promise is kept.
*/
struct inode *scoutfs_ilookup_nofreeing(struct super_block *sb, u64 ino)
{
return ilookup5(sb, ino, iget_test_nofreeing, &ino);
}
struct inode *scoutfs_iget(struct super_block *sb, u64 ino)
{
struct scoutfs_lock *lock = NULL;
@@ -718,8 +687,6 @@ struct inode *scoutfs_iget(struct super_block *sb, u64 ino)
atomic64_set(&si->last_refreshed, 0);
ret = scoutfs_inode_refresh(inode, lock, 0);
if (ret == 0)
ret = scoutfs_omap_inc(sb, ino);
if (ret) {
iget_failed(inode);
inode = ERR_PTR(ret);
@@ -1416,8 +1383,6 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
si->next_xattr_id = 0;
si->have_item = false;
atomic64_set(&si->last_refreshed, lock->refresh_gen);
scoutfs_lock_add_coverage(sb, lock, &si->ino_lock_cov);
si->drop_invalidated = false;
si->flags = 0;
scoutfs_inode_set_meta_seq(inode);
@@ -1433,17 +1398,10 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
store_inode(&sinode, inode);
init_inode_key(&key, scoutfs_ino(inode));
ret = scoutfs_omap_inc(sb, ino);
if (ret < 0)
goto out;
ret = scoutfs_item_create(sb, &key, &sinode, sizeof(sinode), lock);
if (ret < 0)
scoutfs_omap_dec(sb, ino);
out:
if (ret) {
iput(inode);
inode = ERR_PTR(ret);
return ERR_PTR(ret);
}
return inode;
@@ -1459,18 +1417,7 @@ static void init_orphan_key(struct scoutfs_key *key, u64 rid, u64 ino)
};
}
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_lock *lock = sbi->rid_lock;
struct scoutfs_key key;
init_orphan_key(&key, sbi->rid, ino);
return scoutfs_item_dirty(sb, &key, lock);
}
int scoutfs_orphan_delete(struct super_block *sb, u64 ino)
static int remove_orphan_item(struct super_block *sb, u64 ino)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_lock *lock = sbi->rid_lock;
@@ -1488,15 +1435,15 @@ int scoutfs_orphan_delete(struct super_block *sb, u64 ino)
/*
* Remove all the items associated with a given inode. This is only
* called once nlink has dropped to zero and nothing has the inode open
* so we don't have to worry about dirents referencing the inode or link
* backrefs. Dropping nlink to 0 also created an orphan item. That
* orphan item will continue triggering attempts to finish previous
* partial deletion until all deletion is complete and the orphan item
* is removed.
* called once nlink has dropped to zero so we don't have to worry about
* dirents referencing the inode or link backrefs. Dropping nlink to 0
* also created an orphan item. That orphan item will continue
* triggering attempts to finish previous partial deletion until all
* deletion is complete and the orphan item is removed.
*/
static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lock *lock)
static int delete_inode_items(struct super_block *sb, u64 ino)
{
struct scoutfs_lock *lock = NULL;
struct scoutfs_inode sinode;
struct scoutfs_key key;
LIST_HEAD(ind_locks);
@@ -1506,6 +1453,10 @@ static int delete_inode_items(struct super_block *sb, u64 ino, struct scoutfs_lo
u64 size;
int ret;
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &lock);
if (ret)
return ret;
init_inode_key(&key, ino);
ret = scoutfs_item_lookup_exact(sb, &key, &sinode, sizeof(sinode),
@@ -1565,29 +1516,23 @@ retry:
if (ret)
goto out;
ret = scoutfs_orphan_delete(sb, ino);
ret = remove_orphan_item(sb, ino);
out:
if (release)
scoutfs_release_trans(sb);
scoutfs_inode_index_unlock(sb, &ind_locks);
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
return ret;
}
/*
* iput_final has already written out the dirty pages to the inode
* before we get here. We're left with a clean inode that we have to
* tear down. We use locking and open inode number bitmaps to decide if
* we should finally destroy an inode that is no longer open nor
* reachable through directory entries.
* tear down. If there are no more links to the inode then we also
* remove all its persistent structures.
*/
void scoutfs_evict_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const u64 ino = scoutfs_ino(inode);
struct scoutfs_lock *lock;
int ret;
trace_scoutfs_evict_inode(inode->i_sb, scoutfs_ino(inode),
inode->i_nlink, is_bad_inode(inode));
@@ -1596,45 +1541,19 @@ void scoutfs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
ret = scoutfs_omap_should_delete(sb, inode, &lock);
if (ret > 0) {
ret = delete_inode_items(inode->i_sb, scoutfs_ino(inode), lock);
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
}
if (ret < 0)
scoutfs_err(sb, "error %d while checking to delete inode nr %llu, it might linger.",
ret, ino);
scoutfs_omap_dec(sb, ino);
if (inode->i_nlink == 0)
delete_inode_items(inode->i_sb, scoutfs_ino(inode));
clear:
clear_inode(inode);
}
/*
* We want to remove inodes from the cache as their count goes to 0 if
* they're no longer covered by a cluster lock or if while locked they
* were unlinked.
*
* We don't want unused cached inodes to linger outside of cluster
* locking so that they don't prevent final inode deletion on other
* nodes. We don't have specific per-inode or per-dentry locks which
* would otherwise remove the stale caches as they're invalidated.
* Stale cached inodes provide little value because they're going to be
* refreshed the next time they're locked. Populating the item cache
* and loading the inode item is a lot more expensive than initializing
* and inserting a newly allocated vfs inode.
*/
int scoutfs_drop_inode(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
struct super_block *sb = inode->i_sb;
int ret = generic_drop_inode(inode);
trace_scoutfs_drop_inode(sb, scoutfs_ino(inode), inode->i_nlink, inode_unhashed(inode),
si->drop_invalidated);
return si->drop_invalidated || !scoutfs_lock_is_covered(sb, &si->ino_lock_cov) ||
generic_drop_inode(inode);
trace_scoutfs_drop_inode(inode->i_sb, scoutfs_ino(inode),
inode->i_nlink, inode_unhashed(inode));
return ret;
}
/*
@@ -1651,10 +1570,8 @@ int scoutfs_scan_orphans(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_lock *lock = sbi->rid_lock;
struct scoutfs_lock *inode_lock = NULL;
struct scoutfs_key key;
struct scoutfs_key last;
u64 ino;
int err = 0;
int ret;
@@ -1670,13 +1587,7 @@ int scoutfs_scan_orphans(struct super_block *sb)
if (ret < 0)
goto out;
ino = le64_to_cpu(key.sko_ino);
ret = scoutfs_lock_ino(sb, SCOUTFS_LOCK_WRITE, 0, ino, &inode_lock);
if (ret == 0) {
ret = delete_inode_items(sb, le64_to_cpu(key.sko_ino), inode_lock);
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
}
ret = delete_inode_items(sb, le64_to_cpu(key.sko_ino));
if (ret && ret != -ENOENT && !err)
err = ret;

View File

@@ -51,13 +51,6 @@ struct scoutfs_inode_info {
struct rw_semaphore xattr_rwsem;
struct rb_node writeback_node;
struct scoutfs_lock_coverage ino_lock_cov;
/* drop if i_count hits 0, allows drop while invalidate holds coverage */
bool drop_invalidated;
struct llist_node inv_iput_llnode;
atomic_t inv_iput_count;
struct inode inode;
};
@@ -79,7 +72,6 @@ int scoutfs_orphan_inode(struct inode *inode);
struct inode *scoutfs_iget(struct super_block *sb, u64 ino);
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
struct inode *scoutfs_ilookup_nofreeing(struct super_block *sb, u64 ino);
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
u32 minor, u64 ino);
@@ -122,8 +114,6 @@ int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
int scoutfs_scan_orphans(struct super_block *sb);
int scoutfs_orphan_dirty(struct super_block *sb, u64 ino);
int scoutfs_orphan_delete(struct super_block *sb, u64 ino);
void scoutfs_inode_queue_writeback(struct inode *inode);
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);

View File

@@ -972,18 +972,12 @@ static long scoutfs_ioc_move_blocks(struct file *file, unsigned long arg)
goto out;
}
if (mb.flags & SCOUTFS_IOC_MB_UNKNOWN) {
ret = -EINVAL;
goto out;
}
ret = mnt_want_write_file(file);
if (ret < 0)
goto out;
ret = scoutfs_data_move_blocks(from, mb.from_off, mb.len,
to, mb.to_off, !!(mb.flags & SCOUTFS_IOC_MB_STAGE),
mb.data_version);
to, mb.to_off);
mnt_drop_write_file(file);
out:
fput(from_file);

View File

@@ -163,7 +163,7 @@ struct scoutfs_ioctl_ino_path_result {
__u64 dir_pos;
__u16 path_bytes;
__u8 _pad[6];
__u8 path[];
__u8 path[0];
};
/* Get a single path from the root to the given inode number */
@@ -259,7 +259,7 @@ struct scoutfs_ioctl_data_waiting {
__u8 _pad[6];
};
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U64_MAX << 0)
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U8_MAX << 0)
#define SCOUTFS_IOC_DATA_WAITING _IOR(SCOUTFS_IOCTL_MAGIC, 6, \
struct scoutfs_ioctl_data_waiting)
@@ -279,7 +279,7 @@ struct scoutfs_ioctl_setattr_more {
};
#define SCOUTFS_IOC_SETATTR_MORE_OFFLINE (1 << 0)
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U64_MAX << 1)
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U8_MAX << 1)
#define SCOUTFS_IOC_SETATTR_MORE _IOW(SCOUTFS_IOCTL_MAGIC, 7, \
struct scoutfs_ioctl_setattr_more)
@@ -418,13 +418,12 @@ struct scoutfs_ioctl_alloc_detail_entry {
* on the same file system.
*
* from_fd specifies the source file and the ioctl is called on the
* destination file. Both files must have write access. from_off specifies
* the byte offset in the source, to_off is the byte offset in the
* destination, and len is the number of bytes in the region to move. All of
* the offsets and lengths must be in multiples of 4KB, except in the case
* where the from_off + len ends at the i_size of the source
* file. data_version is only used when STAGE flag is set (see below). flags
* field is currently only used to optionally specify STAGE behavior.
* destination file. Both files must have write access. from_off
* specifies the byte offset in the source, to_off is the byte offset in
* the destination, and len is the number of bytes in the region to
* move. All of the offsets and lengths must be in multiples of 4KB,
* except in the case where the from_off + len ends at the i_size of the
* source file.
*
* This interface only moves extents which are block granular, it does
* not perform RMW of sub-block byte extents and it does not overwrite
@@ -436,41 +435,30 @@ struct scoutfs_ioctl_alloc_detail_entry {
* i_size. The i_size update will maintain final partial blocks in the
* source.
*
* If STAGE flag is not set, it will return an error if either of the files
* have offline extents. It will return 0 when all of the extents in the
* source region have been moved to the destination. Moving extents updates
* the ctime, mtime, meta_seq, data_seq, and data_version fields of both the
* source and destination inodes. If an error is returned then partial
* It will return an error if either of the files have offline extents.
* It will return 0 when all of the extents in the source region have
* been moved to the destination. Moving extents updates the ctime,
* mtime, meta_seq, data_seq, and data_version fields of both the source
* and destination inodes. If an error is returned then partial
* progress may have been made and inode fields may have been updated.
*
* If STAGE flag is set, as above except destination range must be in an
* offline extent. Fields are updated only for source inode.
*
* Errors specific to this interface include:
*
* EINVAL: from_off, len, or to_off aren't a multiple of 4KB; the source
* and destination files are the same inode; either the source or
* destination is not a regular file; the destination file has
* an existing overlapping extent (if STAGE flag not set); the
* destination range is not in an offline extent (if STAGE set).
* an existing overlapping extent.
* EOVERFLOW: either from_off + len or to_off + len exceeded 64bits.
* EBADF: from_fd isn't a valid open file descriptor.
* EXDEV: the source and destination files are in different filesystems.
* EISDIR: either the source or destination is a directory.
* ENODATA: either the source or destination file have offline extents and
* STAGE flag is not set.
* ESTALE: data_version does not match destination data_version.
* ENODATA: either the source or destination file have offline extents.
*/
#define SCOUTFS_IOC_MB_STAGE (1 << 0)
#define SCOUTFS_IOC_MB_UNKNOWN (U64_MAX << 1)
struct scoutfs_ioctl_move_blocks {
__u64 from_fd;
__u64 from_off;
__u64 len;
__u64 to_off;
__u64 data_version;
__u64 flags;
};
#define SCOUTFS_IOC_MOVE_BLOCKS _IOR(SCOUTFS_IOCTL_MAGIC, 13, \

View File

@@ -34,7 +34,6 @@
#include "data.h"
#include "xattr.h"
#include "item.h"
#include "omap.h"
/*
* scoutfs uses a lock service to manage item cache consistency between
@@ -75,7 +74,6 @@ struct lock_info {
struct super_block *sb;
spinlock_t lock;
bool shutdown;
bool unmounting;
struct rb_root lock_tree;
struct rb_root lock_range_tree;
struct shrinker shrinker;
@@ -89,9 +87,6 @@ struct lock_info {
struct work_struct shrink_work;
struct list_head shrink_list;
atomic64_t next_refresh_gen;
struct work_struct inv_iput_work;
struct llist_head inv_iput_llist;
struct dentry *tseq_dentry;
struct scoutfs_tseq_tree tseq_tree;
};
@@ -127,81 +122,21 @@ static bool lock_modes_match(int granted, int requested)
}
/*
* Final iput can get into evict and perform final inode deletion which
* can delete a lot of items under locks and transactions. We really
* don't want to be doing all that in an iput during invalidation. When
* invalidation sees that iput might perform final deletion it puts them
* on a list and queues this work.
*
* Nothing stops multiple puts for multiple invalidations of an inode
* before the work runs so we can track multiple puts in flight.
*/
static void lock_inv_iput_worker(struct work_struct *work)
{
struct lock_info *linfo = container_of(work, struct lock_info, inv_iput_work);
struct scoutfs_inode_info *si;
struct scoutfs_inode_info *tmp;
struct llist_node *inodes;
bool more;
inodes = llist_del_all(&linfo->inv_iput_llist);
llist_for_each_entry_safe(si, tmp, inodes, inv_iput_llnode) {
do {
more = atomic_dec_return(&si->inv_iput_count) > 0;
iput(&si->inode);
} while (more);
}
}
/*
* Invalidate cached data associated with an inode whose lock is going
* away. We ignore indoes with I_FREEING instead of waiting on them to
* avoid a deadlock, if they're freeing then they won't be visible to
* future lock users and we don't need to invalidate them.
*
* We try to drop cached dentries and inodes covered by the lock if they
* aren't referenced. This removes them from the mount's open map and
* allows deletions to be performed by unlink without having to wait for
* remote cached inodes to be dropped.
*
* If the cached inode was already deferring final inode deletion then
* we can't perform that inline in invalidation. The locking alone
* deadlock, and it might also take multiple transactions to fully
* delete an inode with significant metadata. We only perform the iput
* inline if we know that possible eviction can't perform the final
* deletion, otherwise we kick it off to async work.
* invalidate cached data associated with an inode whose lock is going
* away.
*/
static void invalidate_inode(struct super_block *sb, u64 ino)
{
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_inode_info *si;
struct inode *inode;
inode = scoutfs_ilookup_nofreeing(sb, ino);
inode = scoutfs_ilookup(sb, ino);
if (inode) {
si = SCOUTFS_I(inode);
scoutfs_inc_counter(sb, lock_invalidate_inode);
if (S_ISREG(inode->i_mode)) {
truncate_inode_pages(inode->i_mapping, 0);
scoutfs_data_wait_changed(inode);
}
/* can't touch during unmount, dcache destroys w/o locks */
if (!linfo->unmounting)
d_prune_aliases(inode);
si->drop_invalidated = true;
if (scoutfs_lock_is_covered(sb, &si->ino_lock_cov) && inode->i_nlink > 0) {
iput(inode);
} else {
/* defer iput to work context so we don't evict inodes from invalidation */
if (atomic_inc_return(&si->inv_iput_count) == 1)
llist_add(&si->inv_iput_llnode, &linfo->inv_iput_llist);
smp_wmb(); /* count and list visible before work executes */
queue_work(linfo->workq, &linfo->inv_iput_work);
}
iput(inode);
}
}
@@ -237,16 +172,6 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
/* have to invalidate if we're not in the only usable case */
if (!(prev == SCOUTFS_LOCK_WRITE && mode == SCOUTFS_LOCK_READ)) {
retry:
/* invalidate inodes before removing coverage */
if (lock->start.sk_zone == SCOUTFS_FS_ZONE) {
ino = le64_to_cpu(lock->start.ski_ino);
last = le64_to_cpu(lock->end.ski_ino);
while (ino <= last) {
invalidate_inode(sb, ino);
ino++;
}
}
/* remove cov items to tell users that their cache is stale */
spin_lock(&lock->cov_list_lock);
list_for_each_entry_safe(cov, tmp, &lock->cov_list, head) {
@@ -262,6 +187,15 @@ retry:
}
spin_unlock(&lock->cov_list_lock);
if (lock->start.sk_zone == SCOUTFS_FS_ZONE) {
ino = le64_to_cpu(lock->start.ski_ino);
last = le64_to_cpu(lock->end.ski_ino);
while (ino <= last) {
invalidate_inode(sb, ino);
ino++;
}
}
scoutfs_item_invalidate(sb, &lock->start, &lock->end);
}
@@ -295,7 +229,6 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
BUG_ON(!list_empty(&lock->shrink_head));
BUG_ON(!list_empty(&lock->cov_list));
scoutfs_omap_free_lock_data(lock->omap_data);
kfree(lock);
}
@@ -331,7 +264,6 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
lock->mode = SCOUTFS_LOCK_NULL;
atomic64_set(&lock->forest_bloom_nr, 0);
spin_lock_init(&lock->omap_spinlock);
trace_scoutfs_lock_alloc(sb, lock);
@@ -621,7 +553,7 @@ static void queue_grant_work(struct lock_info *linfo)
{
assert_spin_locked(&linfo->lock);
if (!list_empty(&linfo->grant_list))
if (!list_empty(&linfo->grant_list) && !linfo->shutdown)
queue_work(linfo->workq, &linfo->grant_work);
}
@@ -637,7 +569,7 @@ static void queue_inv_work(struct lock_info *linfo)
{
assert_spin_locked(&linfo->lock);
if (!list_empty(&linfo->inv_list))
if (!list_empty(&linfo->inv_list) && !linfo->shutdown)
mod_delayed_work(linfo->workq, &linfo->inv_dwork, 0);
}
@@ -706,6 +638,7 @@ static void lock_grant_worker(struct work_struct *work)
struct lock_info *linfo = container_of(work, struct lock_info,
grant_work);
struct super_block *sb = linfo->sb;
struct scoutfs_net_lock_grant_response *gr;
struct scoutfs_net_lock *nl;
struct scoutfs_lock *lock;
struct scoutfs_lock *tmp;
@@ -715,7 +648,8 @@ static void lock_grant_worker(struct work_struct *work)
spin_lock(&linfo->lock);
list_for_each_entry_safe(lock, tmp, &linfo->grant_list, grant_head) {
nl = &lock->grant_nl;
gr = &lock->grant_resp;
nl = &lock->grant_resp.nl;
/* wait for reordered invalidation to finish */
if (lock->mode != nl->old_mode)
@@ -733,6 +667,7 @@ static void lock_grant_worker(struct work_struct *work)
lock->request_pending = 0;
lock->mode = nl->new_mode;
lock->write_version = le64_to_cpu(nl->write_version);
lock->roots = gr->roots;
if (lock_count_match_exists(nl->new_mode, lock->waiters))
extend_grace(sb, lock);
@@ -754,8 +689,9 @@ static void lock_grant_worker(struct work_struct *work)
* work to process.
*/
int scoutfs_lock_grant_response(struct super_block *sb,
struct scoutfs_net_lock *nl)
struct scoutfs_net_lock_grant_response *gr)
{
struct scoutfs_net_lock *nl = &gr->nl;
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_lock *lock;
@@ -769,7 +705,7 @@ int scoutfs_lock_grant_response(struct super_block *sb,
trace_scoutfs_lock_grant_response(sb, lock);
BUG_ON(!lock->request_pending);
lock->grant_nl = *nl;
lock->grant_resp = *gr;
list_add_tail(&lock->grant_head, &linfo->grant_list);
queue_grant_work(linfo);
@@ -781,9 +717,7 @@ int scoutfs_lock_grant_response(struct super_block *sb,
/*
* Each lock has received a lock invalidation request from the server
* which specifies a new mode for the lock. The server will only send
* one invalidation request at a time for each lock. The server can
* send another invalidate request after we send the response but before
* we reacquire the lock and finish invalidation.
* one invalidation request at a time for each lock.
*
* This is an unsolicited request from the server so it can arrive at
* any time after we make the server aware of the lock by initially
@@ -870,14 +804,8 @@ static void lock_invalidate_worker(struct work_struct *work)
nl = &lock->inv_nl;
net_id = lock->inv_net_id;
/* only lock protocol, inv can't call subsystems after shutdown */
if (!linfo->shutdown) {
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
BUG_ON(ret);
}
/* allow another request after we respond but before we finish */
lock->inv_net_id = 0;
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
BUG_ON(ret);
/* respond with the key and modes from the request */
ret = scoutfs_client_lock_response(sb, net_id, nl);
@@ -890,13 +818,11 @@ static void lock_invalidate_worker(struct work_struct *work)
spin_lock(&linfo->lock);
list_for_each_entry_safe(lock, tmp, &ready, inv_head) {
list_del_init(&lock->inv_head);
lock->invalidate_pending = 0;
trace_scoutfs_lock_invalidated(sb, lock);
if (lock->inv_net_id == 0) {
/* finish if another request didn't arrive */
list_del_init(&lock->inv_head);
lock->invalidate_pending = 0;
wake_up(&lock->waitq);
}
wake_up(&lock->waitq);
put_lock(linfo, lock);
}
@@ -911,47 +837,34 @@ out:
}
/*
* Record an incoming invalidate request from the server and add its
* lock to the list for processing. This request can be from a new
* server and racing with invalidation that frees from an old server.
* It's fine to not find the requested lock and send an immediate
* response.
* Record an incoming invalidate request from the server and add its lock
* to the list for processing.
*
* The invalidation process drops the linfo lock to send responses. The
* moment it does so we can receive another invalidation request (the
* server can ask us to go from write->read then read->null). We allow
* for one chain like this but it's a bug if we receive more concurrent
* invalidation requests than that. The server should be only sending
* one at a time.
* This is trusting the server and will crash if it's sent bad requests :/
*/
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock *nl)
{
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_lock *lock;
int ret = 0;
scoutfs_inc_counter(sb, lock_invalidate_request);
spin_lock(&linfo->lock);
lock = get_lock(sb, &nl->key);
BUG_ON(!lock);
if (lock) {
BUG_ON(lock->inv_net_id != 0);
lock->inv_net_id = net_id;
BUG_ON(lock->invalidate_pending);
lock->invalidate_pending = 1;
lock->inv_nl = *nl;
if (list_empty(&lock->inv_head)) {
list_add_tail(&lock->inv_head, &linfo->inv_list);
lock->invalidate_pending = 1;
}
lock->inv_net_id = net_id;
list_add_tail(&lock->inv_head, &linfo->inv_list);
trace_scoutfs_lock_invalidate_request(sb, lock);
queue_inv_work(linfo);
}
spin_unlock(&linfo->lock);
if (!lock)
ret = scoutfs_client_lock_response(sb, net_id, nl);
return ret;
return 0;
}
/*
@@ -1082,7 +995,7 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
lock_inc_count(lock->waiters, mode);
for (;;) {
if (WARN_ON_ONCE(linfo->shutdown)) {
if (linfo->shutdown) {
ret = -ESHUTDOWN;
break;
}
@@ -1564,7 +1477,7 @@ restart:
BUG_ON(lock->mode == SCOUTFS_LOCK_NULL);
BUG_ON(!list_empty(&lock->shrink_head));
if (nr-- == 0)
if (linfo->shutdown || nr-- == 0)
break;
__lock_del_lru(linfo, lock);
@@ -1591,7 +1504,7 @@ out:
return ret;
}
void scoutfs_free_unused_locks(struct super_block *sb)
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr)
{
struct lock_info *linfo = SCOUTFS_SB(sb)->lock_info;
struct shrink_control sc = {
@@ -1619,40 +1532,15 @@ static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
}
/*
* shrink_dcache_for_umount() tears down dentries with no locking. We
* need to make sure that our invalidation won't touch dentries before
* we return and the caller calls the generic vfs unmount path.
*/
void scoutfs_lock_unmount_begin(struct super_block *sb)
{
DECLARE_LOCK_INFO(sb, linfo);
if (linfo) {
linfo->unmounting = true;
flush_delayed_work(&linfo->inv_dwork);
}
}
/*
* The caller is going to be shutting down transactions and the client.
* We need to make sure that locking won't call either after we return.
* The caller is going to be calling _destroy soon and, critically, is
* about to shutdown networking before calling us so that we don't get
* any callbacks while we're destroying. We have to ensure that we
* won't call networking after this returns.
*
* At this point all fs callers and internal services that use locks
* should have stopped. We won't have any callers initiating lock
* transitions and sending requests. We set the shutdown flag to catch
* anyone who breaks this rule.
*
* We unregister the shrinker so that we won't try and send null
* requests in response to memory pressure. The locks will all be
* unceremoniously dropped once we get a farewell response from the
* server which indicates that they destroyed our locking state.
*
* We will still respond to invalidation requests that have to be
* processed to let unmount in other mounts acquire locks and make
* progress. However, we don't fully process the invalidation because
* we're shutting down. We only update the lock state and send the
* response. We shouldn't have any users of locking that require
* invalidation correctness at this point.
* Internal fs threads can be using locking, and locking can have async
* work pending. We use ->shutdown to force callers to return
* -ESHUTDOWN and to prevent the future queueing of work that could call
* networking. Locks whose work is stopped will be torn down by _destroy.
*/
void scoutfs_lock_shutdown(struct super_block *sb)
{
@@ -1665,18 +1553,19 @@ void scoutfs_lock_shutdown(struct super_block *sb)
trace_scoutfs_lock_shutdown(sb, linfo);
/* stop the shrinker from queueing work */
unregister_shrinker(&linfo->shrinker);
flush_work(&linfo->shrink_work);
/* cause current and future lock calls to return errors */
spin_lock(&linfo->lock);
linfo->shutdown = true;
for (node = rb_first(&linfo->lock_tree); node; node = rb_next(node)) {
lock = rb_entry(node, struct scoutfs_lock, node);
wake_up(&lock->waitq);
}
spin_unlock(&linfo->lock);
flush_work(&linfo->grant_work);
flush_delayed_work(&linfo->inv_dwork);
flush_work(&linfo->shrink_work);
}
/*
@@ -1704,6 +1593,8 @@ void scoutfs_lock_destroy(struct super_block *sb)
trace_scoutfs_lock_destroy(sb, linfo);
/* stop the shrinker from queueing work */
unregister_shrinker(&linfo->shrinker);
/* make sure that no one's actively using locks */
spin_lock(&linfo->lock);
@@ -1749,10 +1640,8 @@ void scoutfs_lock_destroy(struct super_block *sb)
__lock_del_lru(linfo, lock);
if (!list_empty(&lock->grant_head))
list_del_init(&lock->grant_head);
if (!list_empty(&lock->inv_head)) {
if (!list_empty(&lock->inv_head))
list_del_init(&lock->inv_head);
lock->invalidate_pending = 0;
}
if (!list_empty(&lock->shrink_head))
list_del_init(&lock->shrink_head);
lock_remove(linfo, lock);
@@ -1789,8 +1678,6 @@ int scoutfs_lock_setup(struct super_block *sb)
INIT_WORK(&linfo->shrink_work, lock_shrink_worker);
INIT_LIST_HEAD(&linfo->shrink_list);
atomic64_set(&linfo->next_refresh_gen, 0);
INIT_WORK(&linfo->inv_iput_work, lock_inv_iput_worker);
init_llist_head(&linfo->inv_iput_llist);
scoutfs_tseq_tree_init(&linfo->tseq_tree, lock_tseq_show);
sbi->lock_info = linfo;

View File

@@ -10,8 +10,6 @@
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
struct scoutfs_omap_lock;
/*
* A few fields (start, end, refresh_gen, write_version, granted_mode)
* are referenced by code outside lock.c.
@@ -25,6 +23,7 @@ struct scoutfs_lock {
u64 refresh_gen;
u64 write_version;
u64 dirty_trans_seq;
struct scoutfs_net_roots roots;
struct list_head lru_head;
wait_queue_head_t waitq;
ktime_t grace_deadline;
@@ -32,7 +31,7 @@ struct scoutfs_lock {
invalidate_pending:1;
struct list_head grant_head;
struct scoutfs_net_lock grant_nl;
struct scoutfs_net_lock_grant_response grant_resp;
struct list_head inv_head;
struct scoutfs_net_lock inv_nl;
u64 inv_net_id;
@@ -49,10 +48,6 @@ struct scoutfs_lock {
/* the forest tracks which log tree last saw bloom bit updates */
atomic64_t forest_bloom_nr;
/* open ino mapping has a valid map for a held write lock */
spinlock_t omap_spinlock;
struct scoutfs_omap_lock_data *omap_data;
};
struct scoutfs_lock_coverage {
@@ -62,7 +57,7 @@ struct scoutfs_lock_coverage {
};
int scoutfs_lock_grant_response(struct super_block *sb,
struct scoutfs_net_lock *nl);
struct scoutfs_net_lock_grant_response *gr);
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock *nl);
int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
@@ -101,10 +96,9 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
enum scoutfs_lock_mode mode);
void scoutfs_free_unused_locks(struct super_block *sb);
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr);
int scoutfs_lock_setup(struct super_block *sb);
void scoutfs_lock_unmount_begin(struct super_block *sb);
void scoutfs_lock_shutdown(struct super_block *sb);
void scoutfs_lock_destroy(struct super_block *sb);

View File

@@ -20,10 +20,10 @@
#include "tseq.h"
#include "spbm.h"
#include "block.h"
#include "btree.h"
#include "msg.h"
#include "scoutfs_trace.h"
#include "lock_server.h"
#include "recov.h"
/*
* The scoutfs server implements a simple lock service. Client mounts
@@ -56,11 +56,14 @@
* Message requests and responses are reliably delivered in order across
* reconnection.
*
* As a new server comes up it recovers lock state from existing clients
* which were connected to a previous lock server. Recover requests are
* sent to clients as they connect and they respond with all there
* locks. Once all clients and locks are accounted for normal
* processing can resume.
* The server maintains a persistent record of connected clients. A new
* server instance discovers these and waits for previously connected
* clients to reconnect and recover their state before proceeding. If
* clients don't reconnect they are forcefully prevented from unsafely
* accessing the shared persistent storage. (fenced, according to the
* rules of the platform.. could range from being powered off to having
* their switch port disabled to having their local block device set
* read-only.)
*
* The lock server doesn't respond to memory pressure. The only way
* locks are freed is if they are invalidated to null on behalf of a
@@ -74,8 +77,12 @@ struct lock_server_info {
struct super_block *sb;
spinlock_t lock;
struct mutex mutex;
struct rb_root locks_root;
struct scoutfs_spbm recovery_pending;
struct delayed_work recovery_dwork;
struct scoutfs_tseq_tree tseq_tree;
struct dentry *tseq_dentry;
@@ -423,7 +430,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
goto out;
}
/* XXX should always have a server lock here? */
/* XXX should always have a server lock here? recovery? */
snode = get_server_lock(inf, &nl->key, NULL, false);
if (!snode) {
ret = -EINVAL;
@@ -466,14 +473,18 @@ out:
* so we unlock the snode mutex.
*
* All progress must wait for all clients to finish with recovery
* because we don't know which locks they'll hold. Once recover
* finishes the server calls us to kick all the locks that were waiting
* during recovery.
* because we don't know which locks they'll hold. The unlocked
* recovery_pending test here is OK. It's filled by setup before
* anything runs. It's emptied by recovery completion. We can get a
* false nonempty result if we race with recovery completion, but that's
* OK because recovery completion processes all the locks that have
* requests after emptying, including the unlikely loser of that race.
*/
static int process_waiting_requests(struct super_block *sb,
struct server_lock_node *snode)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_net_lock_grant_response gres;
struct scoutfs_net_lock nl;
struct client_lock_entry *req;
struct client_lock_entry *req_tmp;
@@ -486,7 +497,7 @@ static int process_waiting_requests(struct super_block *sb,
/* processing waits for all invalidation responses or recovery */
if (!list_empty(&snode->invalidated) ||
scoutfs_recov_next_pending(sb, SCOUTFS_RECOV_LOCKS) != 0) {
!scoutfs_spbm_empty(&inf->recovery_pending)) {
ret = 0;
goto out;
}
@@ -536,8 +547,11 @@ static int process_waiting_requests(struct super_block *sb,
nl.write_version = cpu_to_le64(wv);
}
gres.nl = nl;
scoutfs_server_get_roots(sb, &gres.roots);
ret = scoutfs_server_lock_response(sb, req->rid,
req->net_id, &nl);
req->net_id, &gres);
if (ret)
goto out;
@@ -559,39 +573,89 @@ out:
return ret;
}
static void init_lock_clients_key(struct scoutfs_key *key, u64 rid)
{
*key = (struct scoutfs_key) {
.sk_zone = SCOUTFS_LOCK_CLIENTS_ZONE,
.sklc_rid = cpu_to_le64(rid),
};
}
/*
* The server received a greeting from a client for the first time. If
* the client is in lock recovery then we send the initial lock request.
* the client had already talked to the server then we must find an
* existing record for it and should begin recovery. If it doesn't have
* a record then its timed out and we can't allow it to reconnect. If
* we're creating a new record for a client we can see EEXIST if the
* greeting is resent to a new server after the record was committed but
* before the response was received by the client.
*
* This is running in concurrent client greeting processing contexts.
*/
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid)
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
bool should_exist)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
int ret;
if (scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
init_lock_clients_key(&key, rid);
mutex_lock(&inf->mutex);
if (should_exist) {
ret = scoutfs_btree_lookup(sb, &super->lock_clients, &key,
&iref);
if (ret == 0)
scoutfs_btree_put_iref(&iref);
} else {
ret = scoutfs_btree_insert(sb, inf->alloc, inf->wri,
&super->lock_clients,
&key, NULL, 0);
if (ret == -EEXIST)
ret = 0;
}
mutex_unlock(&inf->mutex);
if (should_exist && ret == 0) {
scoutfs_key_set_zeros(&key);
ret = scoutfs_server_lock_recover_request(sb, rid, &key);
} else {
ret = 0;
if (ret)
goto out;
}
out:
return ret;
}
/*
* All clients have finished lock recovery, we can make forward process
* on all the queued requests that were waiting on recovery.
* A client sent their last recovery response and can exit recovery. If
* they were the last client in recovery then we can process all the
* server locks that had requests.
*/
int scoutfs_lock_server_finished_recovery(struct super_block *sb)
static int finished_recovery(struct super_block *sb, u64 rid, bool cancel)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct server_lock_node *snode;
struct scoutfs_key key;
bool still_pending;
int ret = 0;
spin_lock(&inf->lock);
scoutfs_spbm_clear(&inf->recovery_pending, rid);
still_pending = !scoutfs_spbm_empty(&inf->recovery_pending);
spin_unlock(&inf->lock);
if (still_pending)
return 0;
if (cancel)
cancel_delayed_work_sync(&inf->recovery_dwork);
scoutfs_key_set_zeros(&key);
scoutfs_info(sb, "all lock clients recovered");
while ((snode = get_server_lock(inf, &key, NULL, true))) {
key = snode->key;
@@ -635,15 +699,16 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
int i;
/* client must be in recovery */
if (!scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
spin_lock(&inf->lock);
if (!scoutfs_spbm_test(&inf->recovery_pending, rid))
ret = -EINVAL;
spin_unlock(&inf->lock);
if (ret)
goto out;
}
/* client has sent us all their locks */
if (nlr->nr == 0) {
scoutfs_server_recov_finish(sb, rid, SCOUTFS_RECOV_LOCKS);
ret = 0;
ret = finished_recovery(sb, rid, true);
goto out;
}
@@ -694,15 +759,101 @@ out:
return ret;
}
static int get_rid_and_put_ref(struct scoutfs_btree_item_ref *iref, u64 *rid)
{
int ret;
if (iref->val_len == 0) {
*rid = le64_to_cpu(iref->key->sklc_rid);
ret = 0;
} else {
ret = -EIO;
}
scoutfs_btree_put_iref(iref);
return ret;
}
/*
* This work executes if enough time passes without all of the clients
* finishing with recovery and canceling the work. We walk through the
* client records and find any that still have their recovery pending.
*/
static void scoutfs_lock_server_recovery_timeout(struct work_struct *work)
{
struct lock_server_info *inf = container_of(work,
struct lock_server_info,
recovery_dwork.work);
struct super_block *sb = inf->sb;
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
bool timed_out;
u64 rid;
int ret;
ret = scoutfs_server_hold_commit(sb);
if (ret)
goto out;
/* we enter recovery if there are any client records */
for (rid = 0; ; rid++) {
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
if (ret == -ENOENT) {
ret = 0;
break;
}
if (ret == 0)
ret = get_rid_and_put_ref(&iref, &rid);
if (ret < 0)
break;
spin_lock(&inf->lock);
if (scoutfs_spbm_test(&inf->recovery_pending, rid)) {
scoutfs_spbm_clear(&inf->recovery_pending, rid);
timed_out = true;
} else {
timed_out = false;
}
spin_unlock(&inf->lock);
if (!timed_out)
continue;
scoutfs_err(sb, "client rid %016llx lock recovery timed out",
rid);
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
&super->lock_clients, &key);
if (ret)
break;
}
ret = scoutfs_server_apply_commit(sb, ret);
out:
/* force processing all pending lock requests */
if (ret == 0)
ret = finished_recovery(sb, 0, false);
if (ret < 0) {
scoutfs_err(sb, "lock server saw err %d while timing out clients, shutting down", ret);
scoutfs_server_abort(sb);
}
}
/*
* A client is leaving the lock service. They aren't using locks and
* won't send any more requests. We tear down all the state we had for
* them. This can be called multiple times for a given client as their
* farewell is resent to new servers. It's OK to not find any state.
* If we fail to delete a persistent entry then we have to shut down and
* hope that the next server has more luck.
*/
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct client_lock_entry *clent;
struct client_lock_entry *tmp;
struct server_lock_node *snode;
@@ -711,7 +862,20 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
bool freed;
int ret = 0;
mutex_lock(&inf->mutex);
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
&super->lock_clients, &key);
mutex_unlock(&inf->mutex);
if (ret == -ENOENT) {
ret = 0;
goto out;
}
if (ret < 0)
goto out;
scoutfs_key_set_zeros(&key);
while ((snode = get_server_lock(inf, &key, NULL, true))) {
freed = false;
@@ -796,14 +960,23 @@ static void lock_server_tseq_show(struct seq_file *m,
/*
* Setup the lock server. This is called before networking can deliver
* requests.
* requests. If we find existing client records then we enter recovery.
* Lock request processing is deferred until recovery is resolved for
* all the existing clients, either they reconnect and replay locks or
* we time them out.
*/
int scoutfs_lock_server_setup(struct super_block *sb,
struct scoutfs_alloc *alloc,
struct scoutfs_block_writer *wri, u64 max_vers)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct lock_server_info *inf;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
unsigned int nr;
u64 rid;
int ret;
inf = kzalloc(sizeof(struct lock_server_info), GFP_KERNEL);
if (!inf)
@@ -811,7 +984,11 @@ int scoutfs_lock_server_setup(struct super_block *sb,
inf->sb = sb;
spin_lock_init(&inf->lock);
mutex_init(&inf->mutex);
inf->locks_root = RB_ROOT;
scoutfs_spbm_init(&inf->recovery_pending);
INIT_DELAYED_WORK(&inf->recovery_dwork,
scoutfs_lock_server_recovery_timeout);
scoutfs_tseq_tree_init(&inf->tseq_tree, lock_server_tseq_show);
inf->alloc = alloc;
inf->wri = wri;
@@ -826,7 +1003,36 @@ int scoutfs_lock_server_setup(struct super_block *sb,
sbi->lock_server_info = inf;
return 0;
/* we enter recovery if there are any client records */
nr = 0;
for (rid = 0; ; rid++) {
init_lock_clients_key(&key, rid);
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
if (ret == -ENOENT)
break;
if (ret == 0)
ret = get_rid_and_put_ref(&iref, &rid);
if (ret < 0)
goto out;
ret = scoutfs_spbm_set(&inf->recovery_pending, rid);
if (ret)
goto out;
nr++;
if (rid == U64_MAX)
break;
}
ret = 0;
if (nr) {
schedule_delayed_work(&inf->recovery_dwork,
msecs_to_jiffies(LOCK_SERVER_RECOVERY_MS));
scoutfs_info(sb, "waiting for %u lock clients to recover", nr);
}
out:
return ret;
}
/*
@@ -844,6 +1050,8 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
LIST_HEAD(list);
if (inf) {
cancel_delayed_work_sync(&inf->recovery_dwork);
debugfs_remove(inf->tseq_dentry);
rbtree_postorder_for_each_entry_safe(snode, stmp,
@@ -862,6 +1070,8 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
kfree(snode);
}
scoutfs_spbm_destroy(&inf->recovery_pending);
kfree(inf);
sbi->lock_server_info = NULL;
}

View File

@@ -3,10 +3,10 @@
int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
struct scoutfs_net_lock_recover *nlr);
int scoutfs_lock_server_finished_recovery(struct super_block *sb);
int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
u64 net_id, struct scoutfs_net_lock *nl);
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid);
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
bool should_exist);
int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
struct scoutfs_net_lock *nl);
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid);

View File

@@ -944,6 +944,7 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
struct scoutfs_net_connection *acc_conn;
DECLARE_WAIT_QUEUE_HEAD(waitq);
struct socket *acc_sock;
LIST_HEAD(conn_list);
int ret;
trace_scoutfs_net_listen_work_enter(sb, 0, 0);

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +0,0 @@
#ifndef _SCOUTFS_OMAP_H_
#define _SCOUTFS_OMAP_H_
int scoutfs_omap_inc(struct super_block *sb, u64 ino);
void scoutfs_omap_dec(struct super_block *sb, u64 ino);
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
struct scoutfs_lock **lock_ret);
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata);
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
struct scoutfs_open_ino_map_args *args);
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid);
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid);
int scoutfs_omap_finished_recovery(struct super_block *sb);
int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_open_ino_map_args *args);
int scoutfs_omap_server_handle_response(struct super_block *sb, u64 rid,
struct scoutfs_open_ino_map *resp_map);
void scoutfs_omap_server_shutdown(struct super_block *sb);
int scoutfs_omap_setup(struct super_block *sb);
void scoutfs_omap_destroy(struct super_block *sb);
#endif

View File

@@ -1,280 +0,0 @@
/*
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/rhashtable.h>
#include <linux/rcupdate.h>
#include "super.h"
#include "recov.h"
/*
* There are a few server messages which can't be processed until they
* know that they have state for all possibly active clients. These
* little helpers track which clients have recovered what state and give
* those message handlers a call to check if recovery has completed. We
* track the timeout here, but all we do is call back into the server to
* take steps to evict timed out clients and then let us know that their
* recovery has finished.
*/
struct recov_info {
struct super_block *sb;
spinlock_t lock;
struct list_head pending;
struct timer_list timer;
void (*timeout_fn)(struct super_block *);
};
#define DECLARE_RECOV_INFO(sb, name) \
struct recov_info *name = SCOUTFS_SB(sb)->recov_info
struct recov_pending {
struct list_head head;
u64 rid;
int which;
};
static struct recov_pending *find_pending(struct recov_info *recinf, u64 rid, int which)
{
struct recov_pending *pend;
list_for_each_entry(pend, &recinf->pending, head) {
if ((rid == 0 || pend->rid == rid) && (pend->which & which))
return pend;
}
return NULL;
}
/*
* Record that we'll be waiting for a client to recover something.
* _finished will eventually be called for every _prepare, either
* because recovery naturally finished or because it timed out and the
* server evicted the client.
*/
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *alloc;
struct recov_pending *pend;
if (WARN_ON_ONCE(which & SCOUTFS_RECOV_INVALID))
return -EINVAL;
alloc = kmalloc(sizeof(*pend), GFP_NOFS);
if (!alloc)
return -ENOMEM;
spin_lock(&recinf->lock);
pend = find_pending(recinf, rid, SCOUTFS_RECOV_ALL);
if (pend) {
pend->which |= which;
} else {
swap(pend, alloc);
pend->rid = rid;
pend->which = which;
list_add(&pend->head, &recinf->pending);
}
spin_unlock(&recinf->lock);
kfree(alloc);
return 0;
}
/*
* Recovery is only finished once we've begun (which sets the timer) and
* all clients have finished. If we didn't test the timer we could
* claim it finished prematurely as clients are being prepared.
*/
static int recov_finished(struct recov_info *recinf)
{
return !!(recinf->timeout_fn != NULL && list_empty(&recinf->pending));
}
static void timer_callback(struct timer_list *timer)
{
struct recov_info *recinf = from_timer(recinf, timer, timer);
recinf->timeout_fn(recinf->sb);
}
/*
* Begin waiting for recovery once we've prepared all the clients. If
* the timeout period elapses before _finish is called on all prepared
* clients then the timer will call the callback.
*
* Returns > 0 if all the prepared clients finish recovery before begin
* is called.
*/
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
unsigned int timeout_ms)
{
DECLARE_RECOV_INFO(sb, recinf);
int ret;
spin_lock(&recinf->lock);
recinf->timeout_fn = timeout_fn;
recinf->timer.expires = jiffies + msecs_to_jiffies(timeout_ms);
add_timer(&recinf->timer);
ret = recov_finished(recinf);
spin_unlock(&recinf->lock);
if (ret > 0)
del_timer_sync(&recinf->timer);
return ret;
}
/*
* A given client has recovered the given state. If it's finished all
* recovery then we free it, and if all clients have finished recovery
* then we cancel the timeout timer.
*
* Returns > 0 if _begin has been called and all clients have finished.
* The caller will only see > 0 returned once.
*/
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *pend;
int ret = 0;
spin_lock(&recinf->lock);
pend = find_pending(recinf, rid, which);
if (pend) {
pend->which &= ~which;
if (pend->which) {
pend = NULL;
} else {
list_del(&pend->head);
ret = recov_finished(recinf);
}
}
spin_unlock(&recinf->lock);
if (ret > 0)
del_timer_sync(&recinf->timer);
kfree(pend);
return ret;
}
/*
* Returns true if the given client is still trying to recover
* the given state.
*/
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
bool is_pending;
spin_lock(&recinf->lock);
is_pending = find_pending(recinf, rid, which) != NULL;
spin_unlock(&recinf->lock);
return is_pending;
}
/*
* Returns 0 if there are no rids waiting for the given state to be
* recovered. Returns the rid of a client still waiting if there are
* any, in no specified order.
*
* This is inherently racey. Callers are responsible for resolving any
* actions taken based on pending with the recovery finishing, perhaps
* before we return.
*/
u64 scoutfs_recov_next_pending(struct super_block *sb, int which)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *pend;
u64 rid;
spin_lock(&recinf->lock);
pend = find_pending(recinf, 0, which);
rid = pend ? pend->rid : 0;
spin_unlock(&recinf->lock);
return rid;
}
/*
* The server is shutting down and doesn't need to worry about recovery
* anymore. It'll be built up again by the next server, if needed.
*/
void scoutfs_recov_shutdown(struct super_block *sb)
{
DECLARE_RECOV_INFO(sb, recinf);
struct recov_pending *pend;
struct recov_pending *tmp;
LIST_HEAD(list);
del_timer_sync(&recinf->timer);
spin_lock(&recinf->lock);
list_splice_init(&recinf->pending, &list);
recinf->timeout_fn = NULL;
spin_unlock(&recinf->lock);
list_for_each_entry_safe(pend, tmp, &recinf->pending, head) {
list_del(&pend->head);
kfree(pend);
}
}
int scoutfs_recov_setup(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct recov_info *recinf;
int ret;
recinf = kzalloc(sizeof(struct recov_info), GFP_KERNEL);
if (!recinf) {
ret = -ENOMEM;
goto out;
}
recinf->sb = sb;
spin_lock_init(&recinf->lock);
INIT_LIST_HEAD(&recinf->pending);
timer_setup(&recinf->timer, timer_callback, 0);
sbi->recov_info = recinf;
ret = 0;
out:
return ret;
}
void scoutfs_recov_destroy(struct super_block *sb)
{
DECLARE_RECOV_INFO(sb, recinf);
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
if (recinf) {
scoutfs_recov_shutdown(sb);
kfree(recinf);
sbi->recov_info = NULL;
}
}

View File

@@ -1,23 +0,0 @@
#ifndef _SCOUTFS_RECOV_H_
#define _SCOUTFS_RECOV_H_
enum {
SCOUTFS_RECOV_GREETING = ( 1 << 0),
SCOUTFS_RECOV_LOCKS = ( 1 << 1),
SCOUTFS_RECOV_INVALID = (~0 << 2),
SCOUTFS_RECOV_ALL = (~SCOUTFS_RECOV_INVALID),
};
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which);
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
unsigned int timeout_ms);
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which);
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which);
u64 scoutfs_recov_next_pending(struct super_block *sb, int which);
void scoutfs_recov_shutdown(struct super_block *sb);
int scoutfs_recov_setup(struct super_block *sb);
void scoutfs_recov_destroy(struct super_block *sb);
#endif

View File

@@ -690,16 +690,15 @@ TRACE_EVENT(scoutfs_evict_inode,
TRACE_EVENT(scoutfs_drop_inode,
TP_PROTO(struct super_block *sb, __u64 ino, unsigned int nlink,
unsigned int unhashed, bool drop_invalidated),
unsigned int unhashed),
TP_ARGS(sb, ino, nlink, unhashed, drop_invalidated),
TP_ARGS(sb, ino, nlink, unhashed),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, ino)
__field(unsigned int, nlink)
__field(unsigned int, unhashed)
__field(unsigned int, drop_invalidated)
),
TP_fast_assign(
@@ -707,12 +706,10 @@ TRACE_EVENT(scoutfs_drop_inode,
__entry->ino = ino;
__entry->nlink = nlink;
__entry->unhashed = unhashed;
__entry->drop_invalidated = !!drop_invalidated;
),
TP_printk(SCSBF" ino %llu nlink %u unhashed %d drop_invalidated %u", SCSB_TRACE_ARGS,
__entry->ino, __entry->nlink, __entry->unhashed,
__entry->drop_invalidated)
TP_printk(SCSBF" ino %llu nlink %u unhashed %d", SCSB_TRACE_ARGS,
__entry->ino, __entry->nlink, __entry->unhashed)
);
TRACE_EVENT(scoutfs_inode_walk_writeback,
@@ -2405,89 +2402,6 @@ TRACE_EVENT(scoutfs_item_invalidate_page,
sk_trace_args(pg_start), sk_trace_args(pg_end), __entry->pgi)
);
DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(void *, grp)
__field(__u64, group_nr)
__field(unsigned int, group_total)
__field(int, bit_nr)
__field(int, bit_count)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->grp = grp;
__entry->group_nr = group_nr;
__entry->group_total = group_total;
__entry->bit_nr = bit_nr;
__entry->bit_count = bit_count;
),
TP_printk(SCSBF" grp %p group_nr %llu group_total %u bit_nr %d bit_count %d",
SCSB_TRACE_ARGS, __entry->grp, __entry->group_nr, __entry->group_total,
__entry->bit_nr, __entry->bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_alloc,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_free,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_inc,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_dec,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_request,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_destroy,
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
int bit_nr, int bit_count),
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
);
TRACE_EVENT(scoutfs_omap_should_delete,
TP_PROTO(struct super_block *sb, u64 ino, unsigned int nlink, int ret),
TP_ARGS(sb, ino, nlink, ret),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, ino)
__field(unsigned int, nlink)
__field(int, ret)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->ino = ino;
__entry->nlink = nlink;
__entry->ret = ret;
),
TP_printk(SCSBF" ino %llu nlink %u ret %d",
SCSB_TRACE_ARGS, __entry->ino, __entry->nlink, __entry->ret)
);
#endif /* _TRACE_SCOUTFS_H */
/* This part must be outside protection */

View File

@@ -38,8 +38,6 @@
#include "srch.h"
#include "alloc.h"
#include "forest.h"
#include "recov.h"
#include "omap.h"
/*
* Every active mount can act as the server that listens on a net
@@ -98,9 +96,6 @@ struct server_info {
/* stable versions stored from commits, given in locks and rpcs */
seqcount_t roots_seqcount;
struct scoutfs_net_roots roots;
/* recovery timeout fences from work */
struct work_struct fence_pending_recov_work;
};
#define DECLARE_SERVER_INFO(sb, name) \
@@ -187,7 +182,7 @@ int scoutfs_server_apply_commit(struct super_block *sb, int err)
return err;
}
static void get_roots(struct super_block *sb,
void scoutfs_server_get_roots(struct super_block *sb,
struct scoutfs_net_roots *roots)
{
DECLARE_SERVER_INFO(sb, server);
@@ -561,7 +556,7 @@ static int server_get_roots(struct super_block *sb,
memset(&roots, 0, sizeof(roots));
ret = -EINVAL;
} else {
get_roots(sb, &roots);
scoutfs_server_get_roots(sb, &roots);
ret = 0;
}
@@ -867,13 +862,13 @@ int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
}
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_net_lock *nl)
struct scoutfs_net_lock_grant_response *gr)
{
struct server_info *server = SCOUTFS_SB(sb)->server_info;
return scoutfs_net_response_node(sb, server->conn, rid,
SCOUTFS_NET_CMD_LOCK, id, 0,
nl, sizeof(*nl));
gr, sizeof(*gr));
}
static bool invalid_recover(struct scoutfs_net_lock_recover *nlr,
@@ -1021,60 +1016,6 @@ out:
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
}
/* The server is receiving an omap response from the client */
static int open_ino_map_response(struct super_block *sb, struct scoutfs_net_connection *conn,
void *resp, unsigned int resp_len, int error, void *data)
{
u64 rid = scoutfs_net_client_rid(conn);
if (resp_len != sizeof(struct scoutfs_open_ino_map))
return -EINVAL;
return scoutfs_omap_server_handle_response(sb, rid, resp);
}
/* The server is sending an omap request to the client */
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
struct scoutfs_open_ino_map_args *args)
{
struct server_info *server = SCOUTFS_SB(sb)->server_info;
return scoutfs_net_submit_request_node(sb, server->conn, rid, SCOUTFS_NET_CMD_OPEN_INO_MAP,
args, sizeof(*args),
open_ino_map_response, NULL, NULL);
}
/* The server is sending an omap response to the client */
int scoutfs_server_send_omap_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_open_ino_map *map, int err)
{
struct server_info *server = SCOUTFS_SB(sb)->server_info;
return scoutfs_net_response_node(sb, server->conn, rid,
SCOUTFS_NET_CMD_OPEN_INO_MAP, id, err,
map, sizeof(*map));
}
/* The server is receiving an omap request from the client */
static int server_open_ino_map(struct super_block *sb, struct scoutfs_net_connection *conn,
u8 cmd, u64 id, void *arg, u16 arg_len)
{
u64 rid = scoutfs_net_client_rid(conn);
int ret;
if (arg_len != sizeof(struct scoutfs_open_ino_map_args)) {
ret = -EINVAL;
goto out;
}
ret = scoutfs_omap_server_handle_request(sb, rid, id, arg);
out:
if (ret < 0)
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
return 0;
}
static void init_mounted_client_key(struct scoutfs_key *key, u64 rid)
{
*key = (struct scoutfs_key) {
@@ -1257,13 +1198,8 @@ static int server_greeting(struct super_block *sb,
ret = scoutfs_server_apply_commit(sb, ret);
queue_work(server->wq, &server->farewell_work);
if (ret < 0)
goto send_err;
}
scoutfs_server_recov_finish(sb, le64_to_cpu(gr->rid), SCOUTFS_RECOV_GREETING);
ret = 0;
send_err:
err = ret;
@@ -1292,10 +1228,17 @@ send_err:
scoutfs_net_server_greeting(sb, conn, le64_to_cpu(gr->rid), id,
reconnecting, first_contact, farewell);
/* let layers know we have a client connecting for the first time */
/* lock server might send recovery request */
if (le64_to_cpu(gr->server_term) != server->term) {
ret = scoutfs_lock_server_greeting(sb, le64_to_cpu(gr->rid)) ?:
scoutfs_omap_add_rid(sb, le64_to_cpu(gr->rid));
/* we're now doing two commits per greeting, not great */
ret = scoutfs_server_hold_commit(sb);
if (ret)
goto out;
ret = scoutfs_lock_server_greeting(sb, le64_to_cpu(gr->rid),
gr->server_term != 0);
ret = scoutfs_server_apply_commit(sb, ret);
if (ret)
goto out;
}
@@ -1316,25 +1259,6 @@ static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
sizeof(struct scoutfs_mounted_client_btree_val));
}
static int reclaim_rid(struct super_block *sb, u64 rid)
{
int ret;
ret = scoutfs_server_hold_commit(sb);
if (ret < 0)
return ret;
/* delete mounted client last, client reconnect looks for it */
ret = scoutfs_lock_server_farewell(sb, rid) ?:
remove_trans_seq(sb, rid) ?:
reclaim_log_trees(sb, rid) ?:
cancel_srch_compact(sb, rid) ?:
delete_mounted_client(sb, rid) ?:
scoutfs_omap_remove_rid(sb, rid);
return scoutfs_server_apply_commit(sb, ret);
}
/*
* This work processes farewell requests asynchronously. Requests from
* quorum members can be held until only the final majority remains and
@@ -1462,7 +1386,18 @@ static void farewell_worker(struct work_struct *work)
/* process and send farewell responses */
list_for_each_entry_safe(fw, tmp, &send, entry) {
ret = reclaim_rid(sb, fw->rid);
ret = scoutfs_server_hold_commit(sb);
if (ret)
goto out;
/* delete mounted client last, client reconnect looks for it */
ret = scoutfs_lock_server_farewell(sb, fw->rid) ?:
remove_trans_seq(sb, fw->rid) ?:
reclaim_log_trees(sb, fw->rid) ?:
cancel_srch_compact(sb, fw->rid) ?:
delete_mounted_client(sb, fw->rid);
ret = scoutfs_server_apply_commit(sb, ret);
if (ret)
goto out;
}
@@ -1564,7 +1499,6 @@ static scoutfs_net_request_t server_req_funcs[] = {
[SCOUTFS_NET_CMD_LOCK] = server_lock,
[SCOUTFS_NET_CMD_SRCH_GET_COMPACT] = server_srch_get_compact,
[SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT] = server_srch_commit_compact,
[SCOUTFS_NET_CMD_OPEN_INO_MAP] = server_open_ino_map,
[SCOUTFS_NET_CMD_FAREWELL] = server_farewell,
};
@@ -1606,143 +1540,6 @@ static void server_notify_down(struct super_block *sb,
}
}
/*
* All clients have recovered all state. Now we can kick all the work
* that was waiting on recovery.
*
* It's a bit of a false dependency to have all work wait for completion
* before any work can make progress, but recovery is naturally
* concerned about in-memory state. It should all be quick to recover
* once a client arrives.
*/
static void finished_recovery(struct super_block *sb)
{
DECLARE_SERVER_INFO(sb, server);
int ret = 0;
scoutfs_info(sb, "all clients recovered");
ret = scoutfs_omap_finished_recovery(sb) ?:
scoutfs_lock_server_finished_recovery(sb);
if (ret < 0) {
scoutfs_err(sb, "error %d resuming after recovery finished, shutting down", ret);
stop_server(server);
}
}
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which)
{
if (scoutfs_recov_finish(sb, rid, which) > 0)
finished_recovery(sb);
}
/*
* If the recovery timeout is too short we'll prematurely evict mounts
* that would have recovered. They need time to have their sockets
* timeout, reconnect to the current server, and fully recover their
* state.
*
* If it's too long we'll needlessly delay resuming operations after
* clients crash and will never recover.
*/
#define SERVER_RECOV_TIMEOUT_MS (30 * MSEC_PER_SEC)
/*
* Not all clients recovered in time. We fence them and reclaim
* whatever resources they were using. If we see a rid here then we're
* going to fence it, regardless of if it manages to finish recovery
* while we're fencing it.
*/
static void fence_pending_recov_worker(struct work_struct *work)
{
struct server_info *server = container_of(work, struct server_info,
fence_pending_recov_work);
struct super_block *sb = server->sb;
u64 rid;
int ret;
while ((rid = scoutfs_recov_next_pending(sb, SCOUTFS_RECOV_ALL)) > 0) {
scoutfs_err(sb, "%lu ms recovery timeout expired for client rid %016llx, fencing",
SERVER_RECOV_TIMEOUT_MS, rid);
ret = reclaim_rid(sb, rid);
if (ret < 0) {
scoutfs_err(sb, "error %d reclaiming rid %016llx, shutting down", ret, rid);
stop_server(server);
break;
}
scoutfs_server_recov_finish(sb, rid, SCOUTFS_RECOV_ALL);
}
}
static void recovery_timeout(struct super_block *sb)
{
DECLARE_SERVER_INFO(sb, server);
if (!server->shutting_down)
queue_work(server->wq, &server->fence_pending_recov_work);
}
/*
* As the server starts up it needs to start waiting for recovery from
* any clients which were previously still mounted in the last running
* server. This is done before networking is started so we won't
* receive any messages from clients until we've prepared them all. If
* the clients don't recover in time then they'll be fenced.
*/
static int start_recovery(struct super_block *sb)
{
DECLARE_SERVER_INFO(sb, server);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key key;
unsigned int nr = 0;
u64 rid;
int ret;
for (rid = 0; ; rid++) {
init_mounted_client_key(&key, rid);
ret = scoutfs_btree_next(sb, &super->mounted_clients, &key, &iref);
if (ret == -ENOENT) {
ret = 0;
break;
}
if (ret == 0) {
rid = le64_to_cpu(iref.key->skmc_rid);
scoutfs_btree_put_iref(&iref);
}
if (ret < 0)
goto out;
ret = scoutfs_recov_prepare(sb, rid, SCOUTFS_RECOV_ALL);
if (ret < 0) {
scoutfs_err(sb, "error %d preparing recovery for client rid %016llx, shutting down",
ret, rid);
goto out;
}
nr++;
}
if (nr > 0) {
scoutfs_info(sb, "waiting for %u clients to recover", nr);
ret = scoutfs_recov_begin(sb, recovery_timeout, SERVER_RECOV_TIMEOUT_MS);
if (ret > 0) {
finished_recovery(sb);
ret = 0;
}
}
out:
if (ret < 0) {
scoutfs_err(sb, "error %d starting recovery, shutting down", ret);
stop_server(server);
}
return ret;
}
static void scoutfs_server_worker(struct work_struct *work)
{
struct server_info *server = container_of(work, struct server_info,
@@ -1754,6 +1551,7 @@ static void scoutfs_server_worker(struct work_struct *work)
struct scoutfs_net_connection *conn = NULL;
DECLARE_WAIT_QUEUE_HEAD(waitq);
struct sockaddr_in sin;
LIST_HEAD(conn_list);
u64 max_vers;
int ret;
@@ -1813,8 +1611,8 @@ static void scoutfs_server_worker(struct work_struct *work)
goto shutdown;
}
ret = scoutfs_lock_server_setup(sb, &server->alloc, &server->wri, max_vers) ?:
start_recovery(sb);
ret = scoutfs_lock_server_setup(sb, &server->alloc, &server->wri,
max_vers);
if (ret)
goto shutdown;
@@ -1838,15 +1636,10 @@ shutdown:
scoutfs_net_shutdown(sb, conn);
server->conn = NULL;
/* stop tracking recovery, cancel timer, flush any fencing */
scoutfs_recov_shutdown(sb);
flush_work(&server->fence_pending_recov_work);
/* wait for extra queues by requests, won't find waiters */
flush_work(&server->commit_work);
scoutfs_lock_server_destroy(sb);
scoutfs_omap_server_shutdown(sb);
out:
scoutfs_net_free_conn(sb, conn);
@@ -1932,7 +1725,6 @@ int scoutfs_server_setup(struct super_block *sb)
mutex_init(&server->srch_mutex);
mutex_init(&server->mounted_clients_mutex);
seqcount_init(&server->roots_seqcount);
INIT_WORK(&server->fence_pending_recov_work, fence_pending_recov_worker);
server->wq = alloc_workqueue("scoutfs_server",
WQ_UNBOUND | WQ_NON_REENTRANT, 0);

View File

@@ -59,17 +59,13 @@ do { \
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
struct scoutfs_net_lock *nl);
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_net_lock *nl);
struct scoutfs_net_lock_grant_response *gr);
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
struct scoutfs_key *key);
void scoutfs_server_get_roots(struct super_block *sb,
struct scoutfs_net_roots *roots);
int scoutfs_server_hold_commit(struct super_block *sb);
int scoutfs_server_apply_commit(struct super_block *sb, int err);
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
struct scoutfs_open_ino_map_args *args);
int scoutfs_server_send_omap_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_open_ino_map *map, int err);
struct sockaddr_in;
struct scoutfs_quorum_elected_info;

View File

@@ -2156,8 +2156,7 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
if (ret < 0)
goto commit;
ret = scoutfs_alloc_prepare_commit(sb, &alloc, &wri) ?:
scoutfs_block_writer_write(sb, &wri);
ret = scoutfs_block_writer_write(sb, &wri);
commit:
/* the server won't use our partial compact if _ERROR is set */
sc->meta_avail = alloc.avail;

View File

@@ -44,8 +44,6 @@
#include "srch.h"
#include "item.h"
#include "alloc.h"
#include "recov.h"
#include "omap.h"
#include "scoutfs_trace.h"
static struct dentry *scoutfs_debugfs_root;
@@ -168,7 +166,7 @@ out:
* try to free as many locks as possible.
*/
if (scoutfs_trigger(sb, STATFS_LOCK_PURGE))
scoutfs_free_unused_locks(sb);
scoutfs_free_unused_locks(sb, -1UL);
return ret;
}
@@ -245,26 +243,25 @@ static void scoutfs_put_super(struct super_block *sb)
trace_scoutfs_put_super(sb);
sbi->shutdown = true;
scoutfs_data_destroy(sb);
scoutfs_srch_destroy(sb);
scoutfs_unlock(sb, sbi->rid_lock, SCOUTFS_LOCK_WRITE);
sbi->rid_lock = NULL;
scoutfs_lock_shutdown(sb);
scoutfs_shutdown_trans(sb);
scoutfs_client_destroy(sb);
scoutfs_inode_destroy(sb);
scoutfs_item_destroy(sb);
scoutfs_forest_destroy(sb);
scoutfs_data_destroy(sb);
scoutfs_quorum_destroy(sb);
scoutfs_lock_shutdown(sb);
scoutfs_server_destroy(sb);
scoutfs_recov_destroy(sb);
scoutfs_net_destroy(sb);
scoutfs_lock_destroy(sb);
scoutfs_omap_destroy(sb);
scoutfs_block_destroy(sb);
scoutfs_destroy_triggers(sb);
@@ -594,10 +591,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
scoutfs_inode_setup(sb) ?:
scoutfs_data_setup(sb) ?:
scoutfs_setup_trans(sb) ?:
scoutfs_omap_setup(sb) ?:
scoutfs_lock_setup(sb) ?:
scoutfs_net_setup(sb) ?:
scoutfs_recov_setup(sb) ?:
scoutfs_server_setup(sb) ?:
scoutfs_quorum_setup(sb) ?:
scoutfs_client_setup(sb) ?:
@@ -648,9 +643,6 @@ static void scoutfs_kill_sb(struct super_block *sb)
{
trace_scoutfs_kill_sb(sb);
if (SCOUTFS_HAS_SBI(sb))
scoutfs_lock_unmount_begin(sb);
kill_block_super(sb);
}

View File

@@ -26,8 +26,6 @@ struct net_info;
struct block_info;
struct forest_info;
struct srch_info;
struct recov_info;
struct omap_info;
struct scoutfs_sb_info {
struct super_block *sb;
@@ -50,7 +48,6 @@ struct scoutfs_sb_info {
struct block_info *block_info;
struct forest_info *forest_info;
struct srch_info *srch_info;
struct omap_info *omap_info;
struct item_cache_info *item_cache_info;
wait_queue_head_t trans_hold_wq;
@@ -73,7 +70,6 @@ struct scoutfs_sb_info {
struct lock_server_info *lock_server_info;
struct client_info *client_info;
struct server_info *server_info;
struct recov_info *recov_info;
struct sysfs_info *sfsinfo;
struct scoutfs_counters *counters;
@@ -85,6 +81,8 @@ struct scoutfs_sb_info {
struct dentry *debug_root;
bool shutdown;
unsigned long corruption_messages_once[SC_NR_LONGS];
};

View File

@@ -564,15 +564,8 @@ int scoutfs_setup_trans(struct super_block *sb)
}
/*
* While the vfs will have done an fs level sync before calling
* put_super, we may have done work down in our level after all the fs
* ops were done. An example is final inode deletion in iput, that's
* done in generic_shutdown_super after the sync and before calling our
* put_super.
*
* So we always try to write any remaining dirty transactions before
* shutting down. Typically there won't be any dirty data and the
* worker will just return.
* kill_sb calls sync before getting here so we know that dirty data
* should be in flight. We just have to wait for it to quiesce.
*/
void scoutfs_shutdown_trans(struct super_block *sb)
{
@@ -580,18 +573,13 @@ void scoutfs_shutdown_trans(struct super_block *sb)
DECLARE_TRANS_INFO(sb, tri);
if (tri) {
scoutfs_block_writer_forget_all(sb, &tri->wri);
if (sbi->trans_write_workq) {
/* immediately queues pending timer */
flush_delayed_work(&sbi->trans_write_work);
/* prevents re-arming if it has to wait */
cancel_delayed_work_sync(&sbi->trans_write_work);
destroy_workqueue(sbi->trans_write_workq);
/* trans work schedules after shutdown see null */
sbi->trans_write_workq = NULL;
}
scoutfs_block_writer_forget_all(sb, &tri->wri);
kfree(tri);
sbi->trans_info = NULL;
}

View File

@@ -1,4 +1,4 @@
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing -I ../kmod/src
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing
SHELL := /usr/bin/bash
# each binary command is built from a single .c file
@@ -6,7 +6,6 @@ BIN := src/createmany \
src/dumb_setxattr \
src/handle_cat \
src/bulk_create_paths \
src/stage_tmpfile \
src/find_xattrs
DEPS := $(wildcard src/*.d)

View File

@@ -52,8 +52,8 @@ t_filter_dmesg()
# tests that drop unmount io triggers fencing
re="$re|scoutfs .* error: fencing "
re="$re|scoutfs .*: waiting for .* clients"
re="$re|scoutfs .*: all clients recovered"
re="$re|scoutfs .*: waiting for .* lock clients"
re="$re|scoutfs .*: all lock clients recovered"
re="$re|scoutfs .* error: client rid.*lock recovery timed out"
# some tests mount w/o options

View File

@@ -129,7 +129,7 @@ t_umount()
test "$nr" -lt "$T_NR_MOUNTS" || \
t_fail "fs nr $nr invalid"
eval t_quiet umount \$T_M$nr
eval t_quiet umount \$T_M$i
}
#

View File

@@ -1,27 +0,0 @@
== basic unlink deletes
ino found in dseq index
ino not found in dseq index
== local open-unlink waits for close to delete
contents after rm: contents
ino found in dseq index
ino not found in dseq index
== multiple local opens are protected
contents after rm 1: contents
contents after rm 2: contents
ino found in dseq index
ino not found in dseq index
== remote unopened unlink deletes
ino not found in dseq index
ino not found in dseq index
== unlink wait for open on other mount
mount 0 contents after mount 1 rm: contents
ino found in dseq index
ino found in dseq index
stat: cannot stat /mnt/test/test/inode-deletion/file: No such file or directory
ino not found in dseq index
ino not found in dseq index
== lots of deletions use one open map
== open files survive remote scanning orphans
mount 0 contents after mount 1 remounted: contents
ino not found in dseq index
ino not found in dseq index

View File

@@ -1,18 +0,0 @@
total file size 33669120
00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 |AAAAAAAAAAAAAAAA|
*
00400000 42 42 42 42 42 42 42 42 42 42 42 42 42 42 42 42 |BBBBBBBBBBBBBBBB|
*
00801000 43 43 43 43 43 43 43 43 43 43 43 43 43 43 43 43 |CCCCCCCCCCCCCCCC|
*
00c03000 44 44 44 44 44 44 44 44 44 44 44 44 44 44 44 44 |DDDDDDDDDDDDDDDD|
*
01006000 45 45 45 45 45 45 45 45 45 45 45 45 45 45 45 45 |EEEEEEEEEEEEEEEE|
*
0140a000 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 |FFFFFFFFFFFFFFFF|
*
0180f000 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 |GGGGGGGGGGGGGGGG|
*
01c15000 48 48 48 48 48 48 48 48 48 48 48 48 48 48 48 48 |HHHHHHHHHHHHHHHH|
*
0201c000

View File

@@ -1,7 +1,6 @@
Ran:
generic/001
generic/002
generic/004
generic/005
generic/006
generic/007
@@ -74,6 +73,7 @@ generic/376
generic/377
Not
run:
generic/004
generic/008
generic/009
generic/012
@@ -278,4 +278,4 @@ shared/004
shared/032
shared/051
shared/289
Passed all 73 tests
Passed all 72 tests

View File

@@ -13,13 +13,11 @@ lock-refleak.sh
lock-shrink-consistency.sh
lock-pr-cw-conflict.sh
lock-revoke-getcwd.sh
export-lookup-evict-race.sh
createmany-parallel.sh
createmany-large-names.sh
createmany-rename-large-dir.sh
stage-release-race-alloc.sh
stage-multi-part.sh
stage-tmpfile.sh
basic-posix-consistency.sh
dirent-consistency.sh
lock-ex-race-processes.sh
@@ -31,5 +29,4 @@ mount-unmount-race.sh
createmany-parallel-mounts.sh
archive-light-cycle.sh
block-stale-reads.sh
inode-deletion.sh
xfstests.sh

View File

@@ -1,145 +0,0 @@
/*
* Exercise O_TMPFILE creation as well as staging from tmpfiles into
* a released destination file.
*
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <errno.h>
#include <linux/types.h>
#include <assert.h>
#include "ioctl.h"
#define array_size(arr) (sizeof(arr) / sizeof(arr[0]))
/*
* Write known data into 8 tmpfiles.
* Make a new file X and release it
* Move contents of 8 tmpfiles into X.
*/
struct sub_tmp_info {
int fd;
unsigned int offset;
unsigned int length;
};
#define SZ 4096
char buf[SZ];
int main(int argc, char **argv)
{
struct scoutfs_ioctl_release ioctl_args = {0};
struct scoutfs_ioctl_move_blocks mb;
struct sub_tmp_info sub_tmps[8];
int tot_size = 0;
char *dest_file;
int dest_fd;
char *mnt;
int ret;
int i;
if (argc < 3) {
printf("%s <mountpoint> <dest_file>\n", argv[0]);
return 1;
}
mnt = argv[1];
dest_file = argv[2];
for (i = 0; i < array_size(sub_tmps); i++) {
struct sub_tmp_info *sub_tmp = &sub_tmps[i];
int remaining;
sub_tmp->fd = open(mnt, O_RDWR | O_TMPFILE, S_IRUSR | S_IWUSR);
if (sub_tmp->fd < 0) {
perror("error");
exit(1);
}
sub_tmp->offset = tot_size;
/* First tmp file is 4MB */
/* Each is 4k bigger than last */
sub_tmp->length = (i + 1024) * sizeof(buf);
remaining = sub_tmp->length;
/* Each sub tmpfile written with 'A', 'B', etc. */
memset(buf, 'A' + i, sizeof(buf));
while (remaining) {
int written;
written = write(sub_tmp->fd, buf, sizeof(buf));
assert(written == sizeof(buf));
tot_size += sizeof(buf);
remaining -= written;
}
}
printf("total file size %d\n", tot_size);
dest_fd = open(dest_file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (dest_fd == -1) {
perror("error");
exit(1);
}
// make dest file big
ret = posix_fallocate(dest_fd, 0, tot_size);
if (ret) {
perror("error");
exit(1);
}
// release everything in dest file
ioctl_args.offset = 0;
ioctl_args.length = tot_size;
ioctl_args.data_version = 0;
ret = ioctl(dest_fd, SCOUTFS_IOC_RELEASE, &ioctl_args);
if (ret < 0) {
perror("error");
exit(1);
}
// move contents into dest in reverse order
for (i = array_size(sub_tmps) - 1; i >= 0 ; i--) {
struct sub_tmp_info *sub_tmp = &sub_tmps[i];
mb.from_fd = sub_tmp->fd;
mb.from_off = 0;
mb.len = sub_tmp->length;
mb.to_off = sub_tmp->offset;
mb.data_version = 0;
mb.flags = SCOUTFS_IOC_MB_STAGE;
ret = ioctl(dest_fd, SCOUTFS_IOC_MOVE_BLOCKS, &mb);
if (ret < 0) {
perror("error");
exit(1);
}
}
return 0;
}

View File

@@ -1,32 +0,0 @@
#
# test racing fh_to_dentry with evict from lock invalidation. We've
# had deadlocks between the ordering of iget and evict when they acquire
# cluster locks.
#
t_require_commands touch stat handle_cat
t_require_mounts 2
CPUS=$(getconf _NPROCESSORS_ONLN)
NR=$((CPUS * 4))
END=$((SECONDS + 30))
touch "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
while test $SECONDS -lt $END; do
for i in $(seq 1 $NR); do
fs=$((RANDOM % T_NR_MOUNTS))
eval dir="\$T_D${fs}"
write=$((RANDOM & 1))
if [ "$write" == 1 ]; then
touch "$dir/file" &
else
handle_cat "$dir" "$ino" &
fi
done
wait
done
t_pass

View File

@@ -1,98 +0,0 @@
#
# test deleting an inode once all its links and references are gone.
#
t_require_commands cat scoutfs
t_require_mounts 2
FILE="$T_D0/file"
check_ino_index() {
local ino="$1"
local dseq="$2"
local mnt="$3"
t_sync_seq_index
scoutfs walk-inodes -p "$mnt" -- data_seq $dseq $(($dseq + 1)) |
awk 'BEGIN { not = "not " }
($4 == '$ino') { not = ""; exit; }
END { print "ino " not "found in dseq index" }'
}
echo "== basic unlink deletes"
echo "contents" > "$FILE"
ino=$(stat -c "%i" "$FILE")
dseq=$(scoutfs stat -s data_seq "$FILE")
check_ino_index "$ino" "$dseq" "$T_M0"
rm -f "$FILE"
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== local open-unlink waits for close to delete"
echo "contents" > "$FILE"
ino=$(stat -c "%i" "$FILE")
dseq=$(scoutfs stat -s data_seq "$FILE")
exec {FD}<"$FILE" # open unused fd, assign to FD
rm -f "$FILE"
echo "contents after rm: $(cat <&$FD)"
check_ino_index "$ino" "$dseq" "$T_M0"
exec {FD}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== multiple local opens are protected"
echo "contents" > "$FILE"
ino=$(stat -c "%i" "$FILE")
dseq=$(scoutfs stat -s data_seq "$FILE")
exec {FD1}<"$FILE"
exec {FD2}<"$FILE"
rm -f "$FILE"
echo "contents after rm 1: $(cat <&$FD1)"
echo "contents after rm 2: $(cat <&$FD2)"
check_ino_index "$ino" "$dseq" "$T_M0"
exec {FD1}>&- # close
exec {FD2}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
echo "== remote unopened unlink deletes"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
rm -f "$T_D1/file"
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
echo "== unlink wait for open on other mount"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
exec {FD}<"$T_D0/file"
rm -f "$T_D1/file"
echo "mount 0 contents after mount 1 rm: $(cat <&$FD)"
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
exec {FD}>&- # close
# we know that revalidating will unhash the remote dentry
stat "$T_D0/file" 2>&1 | t_filter_fs
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
echo "== lots of deletions use one open map"
mkdir "$T_D0/dir"
touch "$T_D0/dir"/files-{1..5}
rm -f "$T_D0/dir"/files-*
rmdir "$T_D0/dir"
echo "== open files survive remote scanning orphans"
echo "contents" > "$T_D0/file"
ino=$(stat -c "%i" "$T_D0/file")
dseq=$(scoutfs stat -s data_seq "$T_D0/file")
exec {FD}<"$T_D0/file"
rm -f "$T_D0/file"
t_umount 1
t_mount 1
echo "mount 0 contents after mount 1 remounted: $(cat <&$FD)"
exec {FD}>&- # close
check_ino_index "$ino" "$dseq" "$T_M0"
check_ino_index "$ino" "$dseq" "$T_M1"
t_pass

View File

@@ -1,15 +0,0 @@
#
# Run tmpfile_stage and check the output with hexdump.
#
t_require_commands stage_tmpfile hexdump
DEST_FILE="$T_D0/dest_file"
stage_tmpfile $T_D0 $DEST_FILE
hexdump -C "$DEST_FILE"
rm -fr "$DEST_FILE"
t_pass

View File

@@ -32,7 +32,7 @@ struct move_blocks_args {
static int do_move_blocks(struct move_blocks_args *args)
{
struct scoutfs_ioctl_move_blocks mb = {0};
struct scoutfs_ioctl_move_blocks mb;
int from_fd = -1;
int to_fd = -1;
int ret;

View File

@@ -339,6 +339,14 @@ static int print_srch_root_item(struct scoutfs_key *key, void *val,
return 0;
}
static int print_lock_clients_entry(struct scoutfs_key *key, void *val,
unsigned val_len, void *arg)
{
printf(" rid %016llx\n", le64_to_cpu(key->sklc_rid));
return 0;
}
static int print_trans_seqs_entry(struct scoutfs_key *key, void *val,
unsigned val_len, void *arg)
{
@@ -868,6 +876,7 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
" server_meta_avail[1]: "AL_HEAD_F"\n"
" server_meta_freed[0]: "AL_HEAD_F"\n"
" server_meta_freed[1]: "AL_HEAD_F"\n"
" lock_clients root: height %u blkno %llu seq %llu\n"
" mounted_clients root: height %u blkno %llu seq %llu\n"
" srch_root root: height %u blkno %llu seq %llu\n"
" trans_seqs root: height %u blkno %llu seq %llu\n"
@@ -887,6 +896,9 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
AL_HEAD_A(&super->server_meta_avail[1]),
AL_HEAD_A(&super->server_meta_freed[0]),
AL_HEAD_A(&super->server_meta_freed[1]),
super->lock_clients.height,
le64_to_cpu(super->lock_clients.ref.blkno),
le64_to_cpu(super->lock_clients.ref.seq),
super->mounted_clients.height,
le64_to_cpu(super->mounted_clients.ref.blkno),
le64_to_cpu(super->mounted_clients.ref.seq),
@@ -935,6 +947,11 @@ static int print_volume(int fd)
ret = print_quorum_blocks(fd, super);
err = print_btree(fd, super, "lock_clients", &super->lock_clients,
print_lock_clients_entry, NULL);
if (err && !ret)
ret = err;
err = print_btree(fd, super, "mounted_clients", &super->mounted_clients,
print_mounted_client_entry, NULL);
if (err && !ret)