mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-11 06:00:19 +00:00
Compare commits
4 Commits
v1.5
...
zab/odirec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1029d5a0fe | ||
|
|
9fc759ce47 | ||
|
|
bb11617fe3 | ||
|
|
20370c6573 |
@@ -2,64 +2,9 @@ Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.5
|
||||
v1.3-rc
|
||||
\
|
||||
*Jun 21, 2022*
|
||||
|
||||
* **Fix persistent error during server startup**
|
||||
\
|
||||
Fixed a case where the server would always hit a consistent error on
|
||||
seartup, preventing the system from mounting. This required a rare
|
||||
but valid state across the clients.
|
||||
|
||||
* **Fix a client hang that would lead to fencing**
|
||||
\
|
||||
The client module's use of in-kernel networking was missing annotation
|
||||
that could lead to communication hanging. The server would fence the
|
||||
client when it stopped communicating. This could be identified by the
|
||||
server fencing a client after it disconnected with no attempt by the
|
||||
client to reconnect.
|
||||
|
||||
---
|
||||
v1.4
|
||||
\
|
||||
*May 6, 2022*
|
||||
|
||||
* **Fix possible client crash during server failover**
|
||||
\
|
||||
Fixed a narrow window during server failover and lock recovery that
|
||||
could cause a client mount to believe that it had an inconsistent item
|
||||
cache and panic. This required very specific lock state and messaging
|
||||
patterns between multiple mounts and multiple servers which made it
|
||||
unlikely to occur in the field.
|
||||
|
||||
---
|
||||
v1.3
|
||||
\
|
||||
*Apr 7, 2022*
|
||||
|
||||
* **Fix rare server instability under heavy load**
|
||||
\
|
||||
Fixed a case of server instability under heavy load due to concurrent
|
||||
work fully exhausting metadata block allocation pools reserved for a
|
||||
single server transaction. This would cause brief interruption as the
|
||||
server shutdown and the next server started up and made progress as
|
||||
pending work was retried.
|
||||
|
||||
* **Fix slow fencing preventing server startup**
|
||||
\
|
||||
If a server had to process many fence requests with a slow fencing
|
||||
mechanism it could be interrupted before it finished. The server
|
||||
now makes sure heartbeat messages are sent while it is making progress
|
||||
on fencing requests so that other quorum members don't interrupt the
|
||||
process.
|
||||
|
||||
* **Performance improvement in getxattr and setxattr**
|
||||
\
|
||||
Kernel allocation patterns in the getxattr and setxattr
|
||||
implementations were causing significant contention between CPUs. Their
|
||||
allocation strategy was changed so that concurrent tasks can call these
|
||||
xattr methods without degrading performance.
|
||||
*TBD*
|
||||
|
||||
---
|
||||
v1.2
|
||||
|
||||
@@ -84,21 +84,6 @@ static u64 smallest_order_length(u64 len)
|
||||
return 1ULL << (free_extent_order(len) * 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* An extent modification dirties three distinct leaves of an allocator
|
||||
* btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent. Dirtying the paths to these
|
||||
* leaves can grow the tree and grow/shrink neighbours at each level.
|
||||
* We over-estimate the number of blocks allocated and freed (the paths
|
||||
* share a root, growth doesn't free) to err on the simpler and safer
|
||||
* side. The overhead is minimal given the relatively large list blocks
|
||||
* and relatively short allocator trees.
|
||||
*/
|
||||
static u32 extent_mod_blocks(u32 height)
|
||||
{
|
||||
return ((1 + height) * 2) * 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length order, largest first. The location key
|
||||
@@ -892,14 +877,6 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r
|
||||
* -ENOENT is returned if we run out of extents in the source tree
|
||||
* before moving the total.
|
||||
*
|
||||
* If meta_reserved is non-zero then -EINPROGRESS can be returned if the
|
||||
* current meta allocator's avail blocks or room for freed blocks would
|
||||
* have fallen under the reserved amount. The could have been
|
||||
* successfully dirtied in this case but the number of blocks moved is
|
||||
* not returned. The caller is expected to deal with the partial
|
||||
* progress by commiting the dirty trees and examining the resulting
|
||||
* modified trees to see if they need to continue moving extents.
|
||||
*
|
||||
* The caller can specify that extents in the source tree should first
|
||||
* be found based on their zone bitmaps. We'll first try to find
|
||||
* extents in the exclusive zones, then vacant zones, and then we'll
|
||||
@@ -914,7 +891,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_reserved)
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
@@ -964,14 +941,6 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (meta_reserved != 0 &&
|
||||
scoutfs_alloc_meta_low(sb, alloc, meta_reserved +
|
||||
extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
ret = -EINPROGRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
/* searching set start/len, finish initializing alloced extent */
|
||||
ext.map = found.map ? ext.start - found.start + found.map : 0;
|
||||
ext.flags = found.flags;
|
||||
@@ -1096,6 +1065,15 @@ out:
|
||||
* than completely exhausting the avail list or overflowing the freed
|
||||
* list.
|
||||
*
|
||||
* An extent modification dirties three distinct leaves of an allocator
|
||||
* btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent. Dirtying the paths to these
|
||||
* leaves can grow the tree and grow/shrink neighbours at each level.
|
||||
* We over-estimate the number of blocks allocated and freed (the paths
|
||||
* share a root, growth doesn't free) to err on the simpler and safer
|
||||
* side. The overhead is minimal given the relatively large list blocks
|
||||
* and relatively short allocator trees.
|
||||
*
|
||||
* The caller tells us how many extents they're about to modify and how
|
||||
* many other additional blocks they may cow manually. And finally, the
|
||||
* caller could be the first to dirty the avail and freed blocks in the
|
||||
@@ -1104,7 +1082,7 @@ out:
|
||||
static bool list_has_blocks(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root, u32 extents, u32 addl_blocks)
|
||||
{
|
||||
u32 tree_blocks = extent_mod_blocks(root->root.height) * extents;
|
||||
u32 tree_blocks = (((1 + root->root.height) * 2) * 3) * extents;
|
||||
u32 most = 1 + tree_blocks + addl_blocks;
|
||||
|
||||
if (le32_to_cpu(alloc->avail.first_nr) < most) {
|
||||
@@ -1340,17 +1318,6 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
return lo;
|
||||
}
|
||||
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space)
|
||||
{
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
*avail_total = le32_to_cpu(alloc->avail.first_nr);
|
||||
*freed_space = list_block_space(alloc->freed.first_nr);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
}
|
||||
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag)
|
||||
{
|
||||
|
||||
@@ -131,7 +131,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_reserved);
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks);
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
@@ -158,7 +158,6 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr);
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space);
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag);
|
||||
|
||||
|
||||
@@ -2449,7 +2449,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int free_budget)
|
||||
struct scoutfs_btree_root *root, int alloc_low)
|
||||
{
|
||||
u64 blknos[SCOUTFS_BTREE_MAX_HEIGHT];
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -2459,15 +2459,11 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_avl_node *node;
|
||||
struct scoutfs_avl_node *next;
|
||||
struct scoutfs_key par_next;
|
||||
int nr_freed = 0;
|
||||
int nr_par;
|
||||
int level;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(free_budget <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(root->height > ARRAY_SIZE(blknos)))
|
||||
return -EIO; /* XXX corruption */
|
||||
|
||||
@@ -2542,7 +2538,8 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
while (node) {
|
||||
|
||||
/* make sure we can always free parents after leaves */
|
||||
if ((nr_freed + 1 + nr_par) > free_budget) {
|
||||
if (scoutfs_alloc_meta_low(sb, alloc,
|
||||
alloc_low + nr_par + 1)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -2556,7 +2553,6 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
le64_to_cpu(ref.blkno));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
nr_freed++;
|
||||
|
||||
node = scoutfs_avl_next(&bt->item_root, node);
|
||||
if (node) {
|
||||
@@ -2572,7 +2568,6 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
blknos[i]);
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blknos[i]);
|
||||
BUG_ON(ret); /* checked meta low, freed should fit */
|
||||
nr_freed++;
|
||||
}
|
||||
|
||||
/* restart walk past the subtree we just freed */
|
||||
|
||||
@@ -125,7 +125,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int free_budget);
|
||||
struct scoutfs_btree_root *root, int alloc_low);
|
||||
|
||||
void scoutfs_btree_put_iref(struct scoutfs_btree_item_ref *iref);
|
||||
|
||||
|
||||
@@ -157,7 +157,6 @@
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(quorum_candidate_server_stopping) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
|
||||
297
kmod/src/data.c
297
kmod/src/data.c
@@ -21,6 +21,7 @@
|
||||
#include <linux/log2.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/aio.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
@@ -471,6 +472,7 @@ static int alloc_block(struct super_block *sb, struct inode *inode,
|
||||
ext->map = blkno;
|
||||
ext->flags = 0;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (ret < 0 && blkno > 0) {
|
||||
err = scoutfs_free_data(sb, datinf->alloc, datinf->wri,
|
||||
@@ -488,8 +490,68 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int alloc_block_dio(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_extent *ext, struct buffer_head *bh,
|
||||
u64 iblock, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_DATA_INFO(sb, datinf);
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct data_ext_args args = {
|
||||
.ino = ino,
|
||||
.inode = inode,
|
||||
.lock = lock,
|
||||
};
|
||||
u64 blkno = 0;
|
||||
u64 blocks = 0;
|
||||
u64 count = 0;
|
||||
u64 last;
|
||||
u8 ext_fl = 0;
|
||||
int ret = 0;
|
||||
bool first = true;
|
||||
int err;
|
||||
|
||||
last = (bh->b_size - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
while(blocks < last) {
|
||||
if (ext->len >= last && first)
|
||||
count = min_t(u64, last, SCOUTFS_FALLOCATE_ALLOC_LIMIT);
|
||||
else
|
||||
count = min_t(u64, last - blocks, SCOUTFS_FALLOCATE_ALLOC_LIMIT);
|
||||
|
||||
mutex_lock(&datinf->mutex);
|
||||
ret = scoutfs_alloc_data(sb, datinf->alloc, datinf->wri,
|
||||
&datinf->dalloc, count, &blkno, &count);
|
||||
if (ret == 0) {
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &args, iblock,
|
||||
count, blkno,
|
||||
ext_fl | SEF_UNWRITTEN);
|
||||
if (ret < 0) {
|
||||
err = scoutfs_free_data(sb, datinf->alloc,
|
||||
datinf->wri,
|
||||
&datinf->data_freed,
|
||||
blkno, count);
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&datinf->mutex);
|
||||
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
blocks += count;
|
||||
first = false;
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
|
||||
iblock, 1, ext);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_get_block(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create)
|
||||
struct buffer_head *bh, int create, bool dio_flag)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
@@ -530,7 +592,7 @@ static int scoutfs_get_block(struct inode *inode, sector_t iblock,
|
||||
}
|
||||
|
||||
/* convert unwritten to written, could be staging */
|
||||
if (create && ext.map && (ext.flags & SEF_UNWRITTEN)) {
|
||||
if (create && ext.map && !dio_flag && (ext.flags & SEF_UNWRITTEN)) {
|
||||
un.start = iblock;
|
||||
un.len = 1;
|
||||
un.map = ext.map + (iblock - ext.start);
|
||||
@@ -542,11 +604,26 @@ static int scoutfs_get_block(struct inode *inode, sector_t iblock,
|
||||
set_buffer_new(bh);
|
||||
}
|
||||
goto out;
|
||||
} else if (create && ext.map && dio_flag) {
|
||||
un.start = iblock;
|
||||
un.len = 1;
|
||||
un.map = ext.map + (iblock - ext.start);
|
||||
un.flags = ext.flags;
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &args,
|
||||
un.start, un.len, un.map, un.flags);
|
||||
if (ret == 0) {
|
||||
ext = un;
|
||||
set_buffer_new(bh);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* allocate and map blocks containing our logical block */
|
||||
if (create && !ext.map) {
|
||||
ret = alloc_block(sb, inode, &ext, iblock, lock);
|
||||
if (dio_flag)
|
||||
ret = alloc_block_dio(sb, inode, &ext, bh, iblock, lock);
|
||||
else
|
||||
ret = alloc_block(sb, inode, &ext, iblock, lock);
|
||||
if (ret == 0)
|
||||
set_buffer_new(bh);
|
||||
} else {
|
||||
@@ -580,25 +657,75 @@ static int scoutfs_get_block_read(struct inode *inode, sector_t iblock,
|
||||
int ret;
|
||||
|
||||
down_read(&si->extent_sem);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create, false);
|
||||
up_read(&si->extent_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_get_block_write(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create)
|
||||
int scoutfs_get_block_write(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
int ret;
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create, false);
|
||||
up_write(&si->extent_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int scoutfs_get_block_write_dio(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
lock = scoutfs_per_task_get(&si->pt_data_lock);
|
||||
if (WARN_ON_ONCE(!lock)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (inode)
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
|
||||
true, false);
|
||||
else
|
||||
ret = scoutfs_hold_trans(sb, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (inode)
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
ret = scoutfs_get_block(inode, iblock, bh, create, true);
|
||||
up_write(&si->extent_sem);
|
||||
|
||||
if (inode) {
|
||||
scoutfs_inode_set_data_seq(inode);
|
||||
scoutfs_inode_inc_data_version(inode);
|
||||
inode_inc_iversion(inode);
|
||||
|
||||
if (ret > 0)
|
||||
i_size_write(inode, ret);
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is almost never used. We can't block on a cluster lock while
|
||||
* holding the page lock because lock invalidation gets the page lock
|
||||
@@ -861,6 +988,154 @@ static int scoutfs_write_end(struct file *file, struct address_space *mapping,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static s64 convert_unwritten_items(struct super_block *sb, struct inode *inode,
|
||||
u64 ino, u64 iblock, u64 last,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct data_ext_args args = {
|
||||
.ino = ino,
|
||||
.inode = inode,
|
||||
.lock = lock,
|
||||
};
|
||||
struct scoutfs_extent ext;
|
||||
struct scoutfs_extent un;
|
||||
u64 offset;
|
||||
s64 ret;
|
||||
int i;
|
||||
|
||||
ret = 0;
|
||||
|
||||
for (i = 0; iblock <= last; i++) {
|
||||
if (i == EXTENTS_PER_HOLD) {
|
||||
ret = iblock;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
|
||||
iblock, 1, &ext);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* done if we went past the region */
|
||||
if (ext.start > last) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* nothing to do when already marked written */
|
||||
if (!(ext.flags & SEF_UNWRITTEN)) {
|
||||
iblock = ext.start + ext.len;
|
||||
continue;
|
||||
}
|
||||
|
||||
iblock = max(ext.start, iblock);
|
||||
offset = iblock - ext.start;
|
||||
|
||||
un.start = iblock;
|
||||
un.map = ext.map ? ext.map + offset : 0;
|
||||
un.len = min(ext.len - offset, last - iblock + 1);
|
||||
un.flags = ext.flags & ~(SEF_OFFLINE|SEF_UNWRITTEN);
|
||||
|
||||
ret = scoutfs_ext_set(sb, &data_ext_ops, &args,
|
||||
un.start, un.len, un.map, un.flags);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
iblock += un.len;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
convert_unwritten_extent(struct inode *inode, loff_t offset, ssize_t count)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
u64 iblock;
|
||||
u64 last;
|
||||
ssize_t ret = 0;
|
||||
|
||||
lock = scoutfs_per_task_get(&si->pt_data_lock);
|
||||
if (WARN_ON_ONCE(!lock)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iblock = offset >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
last = (offset + count - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
while(iblock <= last) {
|
||||
if (inode)
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
|
||||
true, false);
|
||||
else
|
||||
ret = scoutfs_hold_trans(sb, false);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (inode)
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
if (ret == 0) {
|
||||
down_write(&si->extent_sem);
|
||||
ret = convert_unwritten_items(sb, inode, ino, iblock,
|
||||
last, lock);
|
||||
up_write(&si->extent_sem);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (inode) {
|
||||
scoutfs_inode_set_data_seq(inode);
|
||||
scoutfs_inode_inc_data_version(inode);
|
||||
inode_inc_iversion(inode);
|
||||
|
||||
if (ret > 0)
|
||||
i_size_write(inode, ret);
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
}
|
||||
scoutfs_release_trans(sb);
|
||||
if (inode)
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
if (ret <= 0)
|
||||
break;
|
||||
|
||||
iblock = ret;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
scoutfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
scoutfs_get_block_write_dio);
|
||||
if (ret > 0 && (rw & WRITE))
|
||||
{
|
||||
ret = convert_unwritten_extent(inode, offset, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to allocate unwritten extents for any unallocated regions of the
|
||||
* logical block extent from the caller. The caller manages locks and
|
||||
@@ -1473,13 +1748,14 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
iblock = start >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
last = (start + len - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
while (iblock <= last) {
|
||||
while (true) {
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
|
||||
iblock, 1, &ext);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
last_flags = FIEMAP_EXTENT_LAST;
|
||||
last_flags = FIEMAP_EXTENT_LAST;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1804,6 +2080,7 @@ const struct address_space_operations scoutfs_file_aops = {
|
||||
.writepages = scoutfs_writepages,
|
||||
.write_begin = scoutfs_write_begin,
|
||||
.write_end = scoutfs_write_end,
|
||||
.direct_IO = scoutfs_direct_IO,
|
||||
};
|
||||
|
||||
const struct file_operations scoutfs_file_fops = {
|
||||
|
||||
@@ -49,6 +49,8 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
u64 start, u64 len);
|
||||
long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len);
|
||||
int scoutfs_get_block_write(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh, int create);
|
||||
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
|
||||
@@ -47,6 +47,9 @@ ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
if (!is_sync_kiocb(iocb))
|
||||
return -EINVAL;
|
||||
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
@@ -97,6 +100,9 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
if (!is_sync_kiocb(iocb))
|
||||
return -EINVAL;
|
||||
|
||||
if (iocb->ki_left == 0) /* Does this even happen? */
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/buffer_head.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
@@ -354,15 +355,22 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return 0;
|
||||
|
||||
scoutfs_per_task_add(&si->pt_data_lock, &pt_ent, lock);
|
||||
ret = block_truncate_page(inode->i_mapping, new_size, scoutfs_get_block_write);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
if (new_size != i_size_read(inode))
|
||||
scoutfs_inode_inc_data_version(inode);
|
||||
@@ -378,6 +386,7 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -289,7 +289,6 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
lock->sb = sb;
|
||||
init_waitqueue_head(&lock->waitq);
|
||||
lock->mode = SCOUTFS_LOCK_NULL;
|
||||
lock->invalidating_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
atomic64_set(&lock->forest_bloom_nr, 0);
|
||||
|
||||
@@ -667,9 +666,7 @@ struct inv_req {
|
||||
*
|
||||
* Before we start invalidating the lock we set the lock to the new
|
||||
* mode, preventing further incompatible users of the old mode from
|
||||
* using the lock while we're invalidating. We record the previously
|
||||
* granted mode so that we can send lock recover responses with the old
|
||||
* granted mode during invalidation.
|
||||
* using the lock while we're invalidating.
|
||||
*/
|
||||
static void lock_invalidate_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -694,8 +691,7 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
if (!lock_counts_match(nl->new_mode, lock->users))
|
||||
continue;
|
||||
|
||||
/* set the new mode, no incompatible users during inval, recov needs old */
|
||||
lock->invalidating_mode = lock->mode;
|
||||
/* set the new mode, no incompatible users during inval */
|
||||
lock->mode = nl->new_mode;
|
||||
|
||||
/* move everyone that's ready to our private list */
|
||||
@@ -738,8 +734,6 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
list_del(&ireq->head);
|
||||
kfree(ireq);
|
||||
|
||||
lock->invalidating_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
if (list_empty(&lock->inv_list)) {
|
||||
/* finish if another request didn't arrive */
|
||||
list_del_init(&lock->inv_head);
|
||||
@@ -830,7 +824,6 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_net_lock_recover *nlr;
|
||||
enum scoutfs_lock_mode mode;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *next;
|
||||
struct rb_node *node;
|
||||
@@ -851,15 +844,10 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
|
||||
for (i = 0; lock && i < SCOUTFS_NET_LOCK_MAX_RECOVER_NR; i++) {
|
||||
|
||||
if (lock->invalidating_mode != SCOUTFS_LOCK_NULL)
|
||||
mode = lock->invalidating_mode;
|
||||
else
|
||||
mode = lock->mode;
|
||||
|
||||
nlr->locks[i].key = lock->start;
|
||||
nlr->locks[i].write_seq = cpu_to_le64(lock->write_seq);
|
||||
nlr->locks[i].old_mode = mode;
|
||||
nlr->locks[i].new_mode = mode;
|
||||
nlr->locks[i].old_mode = lock->mode;
|
||||
nlr->locks[i].new_mode = lock->mode;
|
||||
|
||||
node = rb_next(&lock->node);
|
||||
if (node)
|
||||
|
||||
@@ -39,7 +39,6 @@ struct scoutfs_lock {
|
||||
struct list_head cov_list;
|
||||
|
||||
enum scoutfs_lock_mode mode;
|
||||
enum scoutfs_lock_mode invalidating_mode;
|
||||
unsigned int waiters[SCOUTFS_LOCK_NR_MODES];
|
||||
unsigned int users[SCOUTFS_LOCK_NR_MODES];
|
||||
|
||||
|
||||
@@ -749,7 +749,7 @@ out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server err %d during client rid %016llx farewell, shutting down",
|
||||
ret, rid);
|
||||
scoutfs_server_stop(sb);
|
||||
scoutfs_server_abort(sb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -991,8 +991,6 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
acc_sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* inherit accepted request funcs from listening conn */
|
||||
acc_conn = scoutfs_net_alloc_conn(sb, conn->notify_up,
|
||||
conn->notify_down,
|
||||
@@ -1055,8 +1053,6 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* caller specified connect timeout */
|
||||
tv.tv_sec = conn->connect_timeout_ms / MSEC_PER_SEC;
|
||||
tv.tv_usec = (conn->connect_timeout_ms % MSEC_PER_SEC) * USEC_PER_MSEC;
|
||||
@@ -1296,7 +1292,7 @@ restart:
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "client fence returned err %d, shutting down server",
|
||||
ret);
|
||||
scoutfs_server_stop(sb);
|
||||
scoutfs_server_abort(sb);
|
||||
}
|
||||
}
|
||||
destroy_conn(acc);
|
||||
@@ -1454,8 +1450,6 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
(char *)&optval, sizeof(optval));
|
||||
|
||||
@@ -105,8 +105,6 @@ enum quorum_role { FOLLOWER, CANDIDATE, LEADER };
|
||||
struct quorum_status {
|
||||
enum quorum_role role;
|
||||
u64 term;
|
||||
u64 server_start_term;
|
||||
int server_event;
|
||||
int vote_for;
|
||||
unsigned long vote_bits;
|
||||
ktime_t timeout;
|
||||
@@ -119,6 +117,7 @@ struct quorum_info {
|
||||
bool shutdown;
|
||||
|
||||
int our_quorum_slot_nr;
|
||||
unsigned long flags;
|
||||
int votes_needed;
|
||||
|
||||
spinlock_t show_lock;
|
||||
@@ -129,6 +128,8 @@ struct quorum_info {
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
};
|
||||
|
||||
#define QINF_FLAG_SERVER 0
|
||||
|
||||
#define DECLARE_QUORUM_INFO(sb, name) \
|
||||
struct quorum_info *name = SCOUTFS_SB(sb)->quorum_info
|
||||
#define DECLARE_QUORUM_INFO_KOBJ(kobj, name) \
|
||||
@@ -493,6 +494,16 @@ static int update_quorum_block(struct super_block *sb, int event, u64 term, bool
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has fenced previous leaders and reclaimed their
|
||||
* resources. We can now update our fence event with a greater term to
|
||||
* stop future leaders from doing the same.
|
||||
*/
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term)
|
||||
{
|
||||
return update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_FENCE, term, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has been elected and has started running but can't
|
||||
* yet assume that it has exclusive access to the metadata device. We
|
||||
@@ -582,9 +593,15 @@ int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term)
|
||||
}
|
||||
|
||||
out:
|
||||
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
if (fence_started) {
|
||||
err = scoutfs_fence_wait_fenced(sb, msecs_to_jiffies(SCOUTFS_QUORUM_FENCE_TO_MS));
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
} else {
|
||||
err = scoutfs_quorum_fence_complete(sb, term);
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, quorum_fence_error);
|
||||
@@ -610,8 +627,9 @@ static void update_show_status(struct quorum_info *qinf, struct quorum_status *q
|
||||
/*
|
||||
* The quorum work always runs in the background of quorum member
|
||||
* mounts. It's responsible for starting and stopping the server if
|
||||
* it's elected leader. While it's leader it sends heartbeats to
|
||||
* suppress other quorum work from standing for election.
|
||||
* it's elected leader, and the server can call back into it to let it
|
||||
* know that it has shut itself down (perhaps due to error) so that the
|
||||
* work should stop sending heartbeats.
|
||||
*/
|
||||
static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -619,7 +637,7 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
struct super_block *sb = qinf->sb;
|
||||
struct sockaddr_in unused;
|
||||
struct quorum_host_msg msg;
|
||||
struct quorum_status qst = {0,};
|
||||
struct quorum_status qst;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
@@ -628,7 +646,9 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
|
||||
/* start out as a follower */
|
||||
qst.role = FOLLOWER;
|
||||
qst.term = 0;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
|
||||
/* read our starting term from greatest in all events in all slots */
|
||||
read_greatest_term(sb, &qst.term);
|
||||
@@ -664,6 +684,20 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
msg.term < qst.term)
|
||||
msg.type = SCOUTFS_QUORUM_MSG_INVALID;
|
||||
|
||||
/* if the server has shutdown we become follower */
|
||||
if (!test_bit(QINF_FLAG_SERVER, &qinf->flags) &&
|
||||
qst.role == LEADER) {
|
||||
qst.role = FOLLOWER;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_server_shutdown);
|
||||
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.term);
|
||||
scoutfs_inc_counter(sb, quorum_send_resignation);
|
||||
}
|
||||
|
||||
trace_scoutfs_quorum_loop(sb, qst.role, qst.term, qst.vote_for,
|
||||
qst.vote_bits,
|
||||
ktime_to_timespec64(qst.timeout));
|
||||
@@ -674,6 +708,8 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
if (qst.role == LEADER) {
|
||||
scoutfs_warn(sb, "saw msg type %u from %u for term %llu while leader in term %llu, shutting down server.",
|
||||
msg.type, msg.from, msg.term, qst.term);
|
||||
update_show_status(qinf, &qst);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
qst.role = FOLLOWER;
|
||||
qst.term = msg.term;
|
||||
@@ -695,13 +731,6 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
/* followers and candidates start new election on timeout */
|
||||
if (qst.role != LEADER &&
|
||||
ktime_after(ktime_get(), qst.timeout)) {
|
||||
/* .. but only if their server has stopped */
|
||||
if (!scoutfs_server_is_down(sb)) {
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_candidate_server_stopping);
|
||||
continue;
|
||||
}
|
||||
|
||||
qst.role = CANDIDATE;
|
||||
qst.term++;
|
||||
qst.vote_for = -1;
|
||||
@@ -750,62 +779,24 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
qst.server_start_term = qst.term;
|
||||
qst.server_event = SCOUTFS_QUORUM_EVENT_ELECT;
|
||||
scoutfs_server_start(sb, qst.term);
|
||||
}
|
||||
|
||||
/*
|
||||
* This leader's server is up, having finished fencing
|
||||
* previous leaders. We update the fence event with the
|
||||
* current term to let future leaders know that previous
|
||||
* servers have been fenced.
|
||||
*/
|
||||
if (qst.role == LEADER && qst.server_event != SCOUTFS_QUORUM_EVENT_FENCE &&
|
||||
scoutfs_server_is_up(sb)) {
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_FENCE, qst.term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
qst.server_event = SCOUTFS_QUORUM_EVENT_FENCE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop a running server if we're no longer leader in
|
||||
* its term.
|
||||
*/
|
||||
if (!(qst.role == LEADER && qst.term == qst.server_start_term) &&
|
||||
scoutfs_server_is_running(sb)) {
|
||||
/* make very sure server is fully shut down */
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
/* set server bit before server shutdown could clear */
|
||||
set_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
|
||||
/*
|
||||
* A previously running server has stopped. The quorum
|
||||
* protocol might have shut it down by changing roles or
|
||||
* it might have stopped on its own, perhaps on errors.
|
||||
* If we're still a leader then we become a follower and
|
||||
* send resignations to encourage the next election.
|
||||
* Always update the _STOP event to stop connections and
|
||||
* fencing.
|
||||
*/
|
||||
if (qst.server_start_term > 0 && scoutfs_server_is_down(sb)) {
|
||||
if (qst.role == LEADER) {
|
||||
qst.role = FOLLOWER;
|
||||
qst.vote_for = -1;
|
||||
qst.vote_bits = 0;
|
||||
qst.timeout = election_timeout();
|
||||
scoutfs_inc_counter(sb, quorum_server_shutdown);
|
||||
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.server_start_term);
|
||||
scoutfs_inc_counter(sb, quorum_send_resignation);
|
||||
ret = scoutfs_server_start(sb, qst.term);
|
||||
if (ret < 0) {
|
||||
clear_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
/* store our increased term */
|
||||
err = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP, qst.term,
|
||||
true);
|
||||
if (err < 0) {
|
||||
ret = err;
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP,
|
||||
qst.server_start_term, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
qst.server_start_term = 0;
|
||||
}
|
||||
|
||||
/* leaders regularly send heartbeats to delay elections */
|
||||
@@ -845,16 +836,11 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
update_show_status(qinf, &qst);
|
||||
|
||||
/* always try to stop a running server as we stop */
|
||||
if (scoutfs_server_is_running(sb)) {
|
||||
scoutfs_server_stop_wait(sb);
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION, qst.term);
|
||||
|
||||
if (qst.server_start_term > 0) {
|
||||
err = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP,
|
||||
qst.server_start_term, true);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
if (test_bit(QINF_FLAG_SERVER, &qinf->flags)) {
|
||||
scoutfs_server_stop(sb);
|
||||
scoutfs_fence_stop(sb);
|
||||
send_msg_others(sb, SCOUTFS_QUORUM_MSG_RESIGNATION,
|
||||
qst.term);
|
||||
}
|
||||
|
||||
/* record that this slot no longer has an active quorum */
|
||||
@@ -866,6 +852,21 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The calling server has shutdown and is no longer using shared
|
||||
* resources. Clear the bit so that we stop sending heartbeats and
|
||||
* allow the next server to be elected. Update the stop event so that
|
||||
* it won't be considered available by clients or fenced by the next
|
||||
* leader.
|
||||
*/
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
|
||||
clear_bit(QINF_FLAG_SERVER, &qinf->flags);
|
||||
update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_STOP, term, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clients read quorum blocks looking for the leader with a server whose
|
||||
* address it can try and connect to.
|
||||
@@ -987,8 +988,6 @@ static ssize_t status_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
qinf->our_quorum_slot_nr);
|
||||
snprintf_ret(buf, size, &ret, "term %llu\n",
|
||||
qst.term);
|
||||
snprintf_ret(buf, size, &ret, "server_start_term %llu\n", qst.server_start_term);
|
||||
snprintf_ret(buf, size, &ret, "server_event %d\n", qst.server_event);
|
||||
snprintf_ret(buf, size, &ret, "role %d (%s)\n",
|
||||
qst.role, role_str(qst.role));
|
||||
snprintf_ret(buf, size, &ret, "vote_for %d\n",
|
||||
|
||||
@@ -2,12 +2,14 @@
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term);
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
|
||||
@@ -1843,53 +1843,6 @@ DEFINE_EVENT(scoutfs_server_client_count_class, scoutfs_server_client_down,
|
||||
TP_ARGS(sb, rid, nr_clients)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_server_commit_users_class,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, holding)
|
||||
__field(int, applying)
|
||||
__field(int, nr_holders)
|
||||
__field(__u32, avail_before)
|
||||
__field(__u32, freed_before)
|
||||
__field(int, exceeded)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->holding = !!holding;
|
||||
__entry->applying = !!applying;
|
||||
__entry->nr_holders = nr_holders;
|
||||
__entry->avail_before = avail_before;
|
||||
__entry->freed_before = freed_before;
|
||||
__entry->exceeded = !!exceeded;
|
||||
),
|
||||
TP_printk(SCSBF" holding %u applying %u nr %u avail_before %u freed_before %u exceeded %u",
|
||||
SCSB_TRACE_ARGS, __entry->holding, __entry->applying, __entry->nr_holders,
|
||||
__entry->avail_before, __entry->freed_before, __entry->exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_hold,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_apply,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_start,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_end,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, exceeded)
|
||||
);
|
||||
|
||||
#define slt_symbolic(mode) \
|
||||
__print_symbolic(mode, \
|
||||
{ SLT_CLIENT, "client" }, \
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -64,6 +64,8 @@ int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
void scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
|
||||
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
|
||||
@@ -75,12 +77,9 @@ u64 scoutfs_server_seq(struct super_block *sb);
|
||||
u64 scoutfs_server_next_seq(struct super_block *sb);
|
||||
void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq);
|
||||
|
||||
void scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
void scoutfs_server_stop_wait(struct super_block *sb);
|
||||
bool scoutfs_server_is_running(struct super_block *sb);
|
||||
bool scoutfs_server_is_up(struct super_block *sb);
|
||||
bool scoutfs_server_is_down(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_setup(struct super_block *sb);
|
||||
void scoutfs_server_destroy(struct super_block *sb);
|
||||
|
||||
@@ -37,15 +37,6 @@ struct attr_funcs {
|
||||
#define ATTR_FUNCS_RO(_name) \
|
||||
static struct attr_funcs _name##_attr_funcs = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t data_device_maj_min_show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
MAJOR(sb->s_bdev->bd_dev), MINOR(sb->s_bdev->bd_dev));
|
||||
}
|
||||
ATTR_FUNCS_RO(data_device_maj_min);
|
||||
|
||||
static ssize_t format_version_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -110,7 +101,6 @@ static ssize_t attr_funcs_show(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
|
||||
static struct attribute *sb_id_attrs[] = {
|
||||
&data_device_maj_min_attr_funcs.attr,
|
||||
&format_version_attr_funcs.attr,
|
||||
&fsid_attr_funcs.attr,
|
||||
&rid_attr_funcs.attr,
|
||||
|
||||
289
kmod/src/xattr.c
289
kmod/src/xattr.c
@@ -57,6 +57,12 @@ static u32 xattr_names_equal(const char *a_name, unsigned int a_len,
|
||||
return a_len == b_len && memcmp(a_name, b_name, a_len) == 0;
|
||||
}
|
||||
|
||||
static unsigned int xattr_full_bytes(struct scoutfs_xattr *xat)
|
||||
{
|
||||
return offsetof(struct scoutfs_xattr,
|
||||
name[xat->name_len + le16_to_cpu(xat->val_len)]);
|
||||
}
|
||||
|
||||
static unsigned int xattr_nr_parts(struct scoutfs_xattr *xat)
|
||||
{
|
||||
return SCOUTFS_XATTR_NR_PARTS(xat->name_len,
|
||||
@@ -131,29 +137,12 @@ int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
}
|
||||
|
||||
/*
|
||||
* xattrs are stored in multiple items. The first item is a
|
||||
* concatenation of an initial header, the name, and then as much of the
|
||||
* value as fits in the remainder of the first item. This return the
|
||||
* size of the first item that'd store an xattr with the given name
|
||||
* length and value payload size.
|
||||
*/
|
||||
static int first_item_bytes(int name_len, size_t size)
|
||||
{
|
||||
if (WARN_ON_ONCE(name_len <= 0) ||
|
||||
WARN_ON_ONCE(name_len > SCOUTFS_XATTR_MAX_NAME_LEN))
|
||||
return 0;
|
||||
|
||||
return min_t(int, sizeof(struct scoutfs_xattr) + name_len + size,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the next xattr, set the caller's key, and copy as much of the
|
||||
* first item into the callers buffer as we can. Returns the number of
|
||||
* bytes copied which can include the header, name, and start of the
|
||||
* value from the first item. The caller is responsible for comparing
|
||||
* their lengths, the header, and the returned length before safely
|
||||
* using the buffer.
|
||||
* Find the next xattr and copy the key, xattr header, and as much of
|
||||
* the name and value into the callers buffer as we can. Returns the
|
||||
* number of bytes copied which include the header, name, and value and
|
||||
* can be limited by the xattr length or the callers buffer. The caller
|
||||
* is responsible for comparing their lengths, the header, and the
|
||||
* returned length before safely using the xattr.
|
||||
*
|
||||
* If a name is provided then we'll iterate over items with a matching
|
||||
* name_hash until we find a matching name. If we don't find a matching
|
||||
@@ -165,17 +154,20 @@ static int first_item_bytes(int name_len, size_t size)
|
||||
* Returns -ENOENT if it didn't find a next item.
|
||||
*/
|
||||
static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
struct scoutfs_xattr *xat, unsigned int xat_bytes,
|
||||
struct scoutfs_xattr *xat, unsigned int bytes,
|
||||
const char *name, unsigned int name_len,
|
||||
u64 name_hash, u64 id, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_key last;
|
||||
u8 last_part;
|
||||
int total;
|
||||
u8 part;
|
||||
int ret;
|
||||
|
||||
/* need to be able to see the name we're looking for */
|
||||
if (WARN_ON_ONCE(name_len > 0 &&
|
||||
xat_bytes < offsetof(struct scoutfs_xattr, name[name_len])))
|
||||
if (WARN_ON_ONCE(name_len > 0 && bytes < offsetof(struct scoutfs_xattr,
|
||||
name[name_len])))
|
||||
return -EINVAL;
|
||||
|
||||
if (name_len)
|
||||
@@ -184,15 +176,26 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
init_xattr_key(key, scoutfs_ino(inode), name_hash, id);
|
||||
init_xattr_key(&last, scoutfs_ino(inode), U32_MAX, U64_MAX);
|
||||
|
||||
last_part = 0;
|
||||
part = 0;
|
||||
total = 0;
|
||||
|
||||
for (;;) {
|
||||
ret = scoutfs_item_next(sb, key, &last, xat, xat_bytes, lock);
|
||||
if (ret < 0)
|
||||
key->skx_part = part;
|
||||
ret = scoutfs_item_next(sb, key, &last,
|
||||
(void *)xat + total, bytes - total,
|
||||
lock);
|
||||
if (ret < 0) {
|
||||
/* XXX corruption, ran out of parts */
|
||||
if (ret == -ENOENT && part > 0)
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_xattr_get_next_key(sb, key);
|
||||
|
||||
/* XXX corruption */
|
||||
if (key->skx_part != 0) {
|
||||
if (key->skx_part != part) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
@@ -202,7 +205,8 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
* the first part and if the next xattr name fits in our
|
||||
* buffer then the item must have included it.
|
||||
*/
|
||||
if ((ret < sizeof(struct scoutfs_xattr) ||
|
||||
if (part == 0 &&
|
||||
(ret < sizeof(struct scoutfs_xattr) ||
|
||||
(xat->name_len <= name_len &&
|
||||
ret < offsetof(struct scoutfs_xattr,
|
||||
name[xat->name_len])) ||
|
||||
@@ -212,7 +216,7 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
break;
|
||||
}
|
||||
|
||||
if (name_len > 0) {
|
||||
if (part == 0 && name_len) {
|
||||
/* ran out of names that could match */
|
||||
if (le64_to_cpu(key->skx_name_hash) != name_hash) {
|
||||
ret = -ENOENT;
|
||||
@@ -220,126 +224,64 @@ static int get_next_xattr(struct inode *inode, struct scoutfs_key *key,
|
||||
}
|
||||
|
||||
/* keep looking for our name */
|
||||
if (!xattr_names_equal(name, name_len, xat->name, xat->name_len)) {
|
||||
if (!xattr_names_equal(name, name_len,
|
||||
xat->name, xat->name_len)) {
|
||||
part = 0;
|
||||
le64_add_cpu(&key->skx_id, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* use the matching name we found */
|
||||
last_part = xattr_nr_parts(xat) - 1;
|
||||
}
|
||||
|
||||
/* found next name */
|
||||
break;
|
||||
total += ret;
|
||||
if (total == bytes || part == last_part) {
|
||||
/* copied as much as we could */
|
||||
ret = total;
|
||||
break;
|
||||
}
|
||||
part++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has already read and verified the xattr's first item.
|
||||
* Copy the value from the tail of the first item and from any future
|
||||
* items into the destination buffer.
|
||||
*/
|
||||
static int copy_xattr_value(struct super_block *sb, struct scoutfs_key *xat_key,
|
||||
struct scoutfs_xattr *xat, int xat_bytes,
|
||||
char *buffer, size_t size,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
size_t copied = 0;
|
||||
int val_tail;
|
||||
int bytes;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* must have first item up to value */
|
||||
if (WARN_ON_ONCE(xat_bytes < sizeof(struct scoutfs_xattr)) ||
|
||||
WARN_ON_ONCE(xat_bytes < offsetof(struct scoutfs_xattr, name[xat->name_len])))
|
||||
return -EINVAL;
|
||||
|
||||
/* only ever copy up to the full value */
|
||||
size = min_t(size_t, size, le16_to_cpu(xat->val_len));
|
||||
|
||||
/* must have full first item if caller needs value from second item */
|
||||
val_tail = SCOUTFS_XATTR_MAX_PART_SIZE -
|
||||
offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
if (WARN_ON_ONCE(size > val_tail && xat_bytes != SCOUTFS_XATTR_MAX_PART_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
/* copy from tail of first item */
|
||||
bytes = min_t(unsigned int, size, val_tail);
|
||||
if (bytes > 0) {
|
||||
memcpy(buffer, &xat->name[xat->name_len], bytes);
|
||||
copied += bytes;
|
||||
}
|
||||
|
||||
key = *xat_key;
|
||||
for (i = 1; copied < size; i++) {
|
||||
key.skx_part = i;
|
||||
bytes = min_t(unsigned int, size - copied, SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
ret = scoutfs_item_lookup(sb, &key, buffer + copied, bytes, lock);
|
||||
if (ret >= 0 && ret != bytes)
|
||||
ret = -EIO;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
copied += ret;
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is working with items that are either in the allocated
|
||||
* first compound item or further items that are offsets into a value
|
||||
* buffer. Give them a pointer and length of the start of the item.
|
||||
*/
|
||||
static void xattr_item_part_buffer(void **buf, int *len, int part,
|
||||
struct scoutfs_xattr *xat, unsigned int xat_bytes,
|
||||
const char *value, size_t size)
|
||||
{
|
||||
int off;
|
||||
|
||||
if (part == 0) {
|
||||
*buf = xat;
|
||||
*len = xat_bytes;
|
||||
} else {
|
||||
off = (part * SCOUTFS_XATTR_MAX_PART_SIZE) -
|
||||
offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
BUG_ON(off >= size); /* calls limited by number of parts */
|
||||
*buf = (void *)value + off;
|
||||
*len = min_t(size_t, size - off, SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create all the items associated with the given xattr. If this
|
||||
* returns an error it will have already cleaned up any items it created
|
||||
* before seeing the error.
|
||||
*/
|
||||
static int create_xattr_items(struct inode *inode, u64 id, struct scoutfs_xattr *xat,
|
||||
int xat_bytes, const char *value, size_t size, u8 new_parts,
|
||||
static int create_xattr_items(struct inode *inode, u64 id,
|
||||
struct scoutfs_xattr *xat, unsigned int bytes,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
void *buf;
|
||||
int len;
|
||||
int i;
|
||||
unsigned int part_bytes;
|
||||
unsigned int total;
|
||||
int ret;
|
||||
|
||||
init_xattr_key(&key, scoutfs_ino(inode),
|
||||
xattr_name_hash(xat->name, xat->name_len), id);
|
||||
|
||||
for (i = 0; i < new_parts; i++) {
|
||||
key.skx_part = i;
|
||||
xattr_item_part_buffer(&buf, &len, i, xat, xat_bytes, value, size);
|
||||
total = 0;
|
||||
ret = 0;
|
||||
while (total < bytes) {
|
||||
part_bytes = min_t(unsigned int, bytes - total,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
ret = scoutfs_item_create(sb, &key, buf, len, lock);
|
||||
if (ret < 0) {
|
||||
ret = scoutfs_item_create(sb, &key,
|
||||
(void *)xat + total, part_bytes,
|
||||
lock);
|
||||
if (ret) {
|
||||
while (key.skx_part-- > 0)
|
||||
scoutfs_item_delete(sb, &key, lock);
|
||||
break;
|
||||
}
|
||||
|
||||
total += part_bytes;
|
||||
key.skx_part++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -387,20 +329,20 @@ out:
|
||||
* deleted items.
|
||||
*/
|
||||
static int change_xattr_items(struct inode *inode, u64 id,
|
||||
struct scoutfs_xattr *xat, int xat_bytes,
|
||||
const char *value, size_t size,
|
||||
u8 new_parts, u8 old_parts, struct scoutfs_lock *lock)
|
||||
struct scoutfs_xattr *new_xat,
|
||||
unsigned int new_bytes, u8 new_parts,
|
||||
u8 old_parts, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_key key;
|
||||
int last_created = -1;
|
||||
void *buf;
|
||||
int len;
|
||||
int bytes;
|
||||
int off;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_xattr_key(&key, scoutfs_ino(inode),
|
||||
xattr_name_hash(xat->name, xat->name_len), id);
|
||||
xattr_name_hash(new_xat->name, new_xat->name_len), id);
|
||||
|
||||
/* dirty existing old items */
|
||||
for (i = 0; i < old_parts; i++) {
|
||||
@@ -412,10 +354,13 @@ static int change_xattr_items(struct inode *inode, u64 id,
|
||||
|
||||
/* create any new items past the old */
|
||||
for (i = old_parts; i < new_parts; i++) {
|
||||
key.skx_part = i;
|
||||
xattr_item_part_buffer(&buf, &len, i, xat, xat_bytes, value, size);
|
||||
off = i * SCOUTFS_XATTR_MAX_PART_SIZE;
|
||||
bytes = min_t(unsigned int, new_bytes - off,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
ret = scoutfs_item_create(sb, &key, buf, len, lock);
|
||||
key.skx_part = i;
|
||||
ret = scoutfs_item_create(sb, &key, (void *)new_xat + off,
|
||||
bytes, lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -424,10 +369,13 @@ static int change_xattr_items(struct inode *inode, u64 id,
|
||||
|
||||
/* update dirtied overlapping existing items, last partial first */
|
||||
for (i = min(old_parts, new_parts) - 1; i >= 0; i--) {
|
||||
key.skx_part = i;
|
||||
xattr_item_part_buffer(&buf, &len, i, xat, xat_bytes, value, size);
|
||||
off = i * SCOUTFS_XATTR_MAX_PART_SIZE;
|
||||
bytes = min_t(unsigned int, new_bytes - off,
|
||||
SCOUTFS_XATTR_MAX_PART_SIZE);
|
||||
|
||||
ret = scoutfs_item_update(sb, &key, buf, len, lock);
|
||||
key.skx_part = i;
|
||||
ret = scoutfs_item_update(sb, &key, (void *)new_xat + off,
|
||||
bytes, lock);
|
||||
/* only last partial can fail, then we unwind created */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -464,7 +412,7 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *lck = NULL;
|
||||
struct scoutfs_key key;
|
||||
unsigned int xat_bytes;
|
||||
unsigned int bytes;
|
||||
size_t name_len;
|
||||
int ret;
|
||||
|
||||
@@ -475,8 +423,9 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
if (name_len > SCOUTFS_XATTR_MAX_NAME_LEN)
|
||||
return -ENODATA;
|
||||
|
||||
xat_bytes = first_item_bytes(name_len, size);
|
||||
xat = kmalloc(xat_bytes, GFP_NOFS);
|
||||
/* only need enough for caller's name and value sizes */
|
||||
bytes = sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
xat = __vmalloc(bytes, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!xat)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -486,32 +435,40 @@ ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
|
||||
down_read(&si->xattr_rwsem);
|
||||
|
||||
ret = get_next_xattr(inode, &key, xat, xat_bytes, name, name_len, 0, 0, lck);
|
||||
ret = get_next_xattr(inode, &key, xat, bytes,
|
||||
name, name_len, 0, 0, lck);
|
||||
|
||||
up_read(&si->xattr_rwsem);
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENODATA;
|
||||
goto unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* the caller just wants to know the size */
|
||||
if (size == 0) {
|
||||
ret = le16_to_cpu(xat->val_len);
|
||||
goto unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* the caller's buffer wasn't big enough */
|
||||
if (size < le16_to_cpu(xat->val_len)) {
|
||||
ret = -ERANGE;
|
||||
goto unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = copy_xattr_value(sb, &key, xat, xat_bytes, buffer, size, lck);
|
||||
unlock:
|
||||
up_read(&si->xattr_rwsem);
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_READ);
|
||||
/* XXX corruption, the items didn't match the header */
|
||||
if (ret < xattr_full_bytes(xat)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = le16_to_cpu(xat->val_len);
|
||||
memcpy(buffer, &xat->name[xat->name_len], ret);
|
||||
out:
|
||||
kfree(xat);
|
||||
vfree(xat);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -639,8 +596,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
bool undo_totl = false;
|
||||
LIST_HEAD(ind_locks);
|
||||
u8 found_parts;
|
||||
unsigned int xat_bytes_totl;
|
||||
unsigned int xat_bytes;
|
||||
unsigned int bytes;
|
||||
unsigned int val_len;
|
||||
u64 ind_seq;
|
||||
u64 total;
|
||||
@@ -673,12 +629,9 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
if (tgs.totl && ((ret = parse_totl_key(&totl_key, name, name_len)) != 0))
|
||||
return ret;
|
||||
|
||||
/* allocate enough to always read an existing xattr's totl */
|
||||
xat_bytes_totl = first_item_bytes(name_len,
|
||||
max_t(size_t, size, SCOUTFS_XATTR_MAX_TOTL_U64));
|
||||
/* but store partial first item that only includes the new xattr's value */
|
||||
xat_bytes = first_item_bytes(name_len, size);
|
||||
xat = kmalloc(xat_bytes_totl, GFP_NOFS);
|
||||
bytes = sizeof(struct scoutfs_xattr) + name_len + size;
|
||||
/* alloc enough to read old totl value */
|
||||
xat = __vmalloc(bytes + SCOUTFS_XATTR_MAX_TOTL_U64, GFP_NOFS, PAGE_KERNEL);
|
||||
if (!xat) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -692,7 +645,9 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
down_write(&si->xattr_rwsem);
|
||||
|
||||
/* find an existing xattr to delete, including possible totl value */
|
||||
ret = get_next_xattr(inode, &key, xat, xat_bytes_totl, name, name_len, 0, 0, lck);
|
||||
ret = get_next_xattr(inode, &key, xat,
|
||||
sizeof(struct scoutfs_xattr) + name_len + SCOUTFS_XATTR_MAX_TOTL_U64,
|
||||
name, name_len, 0, 0, lck);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
goto unlock;
|
||||
|
||||
@@ -728,7 +683,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
le64_add_cpu(&tval.total, -total);
|
||||
}
|
||||
|
||||
/* prepare the xattr header, name, and start of value in first item */
|
||||
/* prepare our xattr */
|
||||
if (value) {
|
||||
if (found_parts)
|
||||
id = le64_to_cpu(key.skx_id);
|
||||
@@ -738,9 +693,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
|
||||
xat->val_len = cpu_to_le16(size);
|
||||
memset(xat->__pad, 0, sizeof(xat->__pad));
|
||||
memcpy(xat->name, name, name_len);
|
||||
memcpy(&xat->name[name_len], value,
|
||||
min(size, SCOUTFS_XATTR_MAX_PART_SIZE -
|
||||
offsetof(struct scoutfs_xattr, name[name_len])));
|
||||
memcpy(&xat->name[xat->name_len], value, size);
|
||||
|
||||
if (tgs.totl) {
|
||||
ret = parse_totl_u64(value, size, &total);
|
||||
@@ -788,15 +741,14 @@ retry:
|
||||
}
|
||||
|
||||
if (found_parts && value)
|
||||
ret = change_xattr_items(inode, id, xat, xat_bytes, value, size,
|
||||
ret = change_xattr_items(inode, id, xat, bytes,
|
||||
xattr_nr_parts(xat), found_parts, lck);
|
||||
else if (found_parts)
|
||||
ret = delete_xattr_items(inode, le64_to_cpu(key.skx_name_hash),
|
||||
le64_to_cpu(key.skx_id), found_parts,
|
||||
lck);
|
||||
else
|
||||
ret = create_xattr_items(inode, id, xat, xat_bytes, value, size,
|
||||
xattr_nr_parts(xat), lck);
|
||||
ret = create_xattr_items(inode, id, xat, bytes, lck);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
@@ -826,7 +778,7 @@ unlock:
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, totl_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
out:
|
||||
kfree(xat);
|
||||
vfree(xat);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -855,7 +807,7 @@ ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *lck = NULL;
|
||||
struct scoutfs_key key;
|
||||
unsigned int xat_bytes;
|
||||
unsigned int bytes;
|
||||
ssize_t total = 0;
|
||||
u32 name_hash = 0;
|
||||
bool is_hidden;
|
||||
@@ -868,8 +820,8 @@ ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
id = *id_pos;
|
||||
|
||||
/* need a buffer large enough for all possible names */
|
||||
xat_bytes = first_item_bytes(SCOUTFS_XATTR_MAX_NAME_LEN, 0);
|
||||
xat = kmalloc(xat_bytes, GFP_NOFS);
|
||||
bytes = sizeof(struct scoutfs_xattr) + SCOUTFS_XATTR_MAX_NAME_LEN;
|
||||
xat = kmalloc(bytes, GFP_NOFS);
|
||||
if (!xat) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -882,7 +834,8 @@ ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
down_read(&si->xattr_rwsem);
|
||||
|
||||
for (;;) {
|
||||
ret = get_next_xattr(inode, &key, xat, xat_bytes, NULL, 0, name_hash, id, lck);
|
||||
ret = get_next_xattr(inode, &key, xat, bytes,
|
||||
NULL, 0, name_hash, id, lck);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = total;
|
||||
|
||||
@@ -1,18 +1,5 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
#
|
||||
# This fencing script is used for testing clusters of multiple mounts on
|
||||
# a single host. It finds mounts to fence by looking for their rids and
|
||||
# only knows how to "fence" by using forced unmount.
|
||||
#
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
log() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
@@ -20,24 +7,29 @@ echo_fail() {
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
[ ! -d "$fs" ] && continue
|
||||
#
|
||||
# Look for a local mount with the rid to fence. Typically we'll at
|
||||
# least find the mount with the server that requested the fence that
|
||||
# we're processing. But it's possible that mounts are unmounted
|
||||
# before, or while, we're running.
|
||||
#
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET) || \
|
||||
echo_fail "findmnt -t scoutfs failed" > /dev/stderr
|
||||
|
||||
fs_rid="$(cat $fs/rid)" || \
|
||||
echo_fail "failed to get rid in $fs"
|
||||
if [ "$fs_rid" != "$rid" ]; then
|
||||
continue
|
||||
fi
|
||||
for mnt in $mnts; do
|
||||
mnt_rid=$(scoutfs statfs -p "$mnt" -s rid) || \
|
||||
echo_fail "scoutfs statfs $mnt failed"
|
||||
|
||||
nr="$(cat $fs/data_device_maj_min)" || \
|
||||
echo_fail "failed to get data device major:minor in $fs"
|
||||
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET -S $nr) || \
|
||||
echo_fail "findmnt -t scoutfs -S $nr failed"
|
||||
for mnt in $mnts; do
|
||||
if [ "$mnt_rid" == "$rid" ]; then
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt failed"
|
||||
done
|
||||
echo_fail "umout -f $mnt"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
|
||||
#
|
||||
# If the mount doesn't exist on this host then it can't access the
|
||||
# devices by definition and can be considered fenced.
|
||||
#
|
||||
exit 0
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
== starting background invalidating read/write load
|
||||
== 60s of lock recovery during invalidating load
|
||||
== stopping background load
|
||||
@@ -45,9 +45,14 @@ generic/107
|
||||
generic/117
|
||||
generic/124
|
||||
generic/129
|
||||
generic/130
|
||||
generic/131
|
||||
generic/135
|
||||
generic/169
|
||||
generic/184
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/221
|
||||
generic/228
|
||||
generic/236
|
||||
@@ -100,13 +105,9 @@ generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
generic/091
|
||||
generic/094
|
||||
generic/096
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/118
|
||||
@@ -115,9 +116,7 @@ generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/128
|
||||
generic/130
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
@@ -165,7 +164,6 @@ generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
@@ -173,11 +171,6 @@ generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
@@ -185,13 +178,11 @@ generic/219
|
||||
generic/220
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/250
|
||||
generic/252
|
||||
@@ -203,7 +194,6 @@ generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
@@ -282,4 +272,4 @@ shared/004
|
||||
shared/032
|
||||
shared/051
|
||||
shared/289
|
||||
Passed all 75 tests
|
||||
Passed all 80 tests
|
||||
|
||||
@@ -380,14 +380,13 @@ cmd grep . /sys/kernel/debug/tracing/options/trace_printk \
|
||||
# Build a fenced config that runs scripts out of the repository rather
|
||||
# than the default system directory
|
||||
#
|
||||
conf="$T_RESULTS/scoutfs-fenced.conf"
|
||||
conf="$T_RESULTS/scoutfs-fencd.conf"
|
||||
cat > $conf << EOF
|
||||
SCOUTFS_FENCED_DELAY=1
|
||||
SCOUTFS_FENCED_RUN=$T_TESTS/fenced-local-force-unmount.sh
|
||||
SCOUTFS_FENCED_RUN_ARGS="ignored run args"
|
||||
SCOUTFS_FENCED_RUN_ARGS=""
|
||||
EOF
|
||||
export SCOUTFS_FENCED_CONFIG_FILE="$conf"
|
||||
T_FENCED_LOG="$T_RESULTS/fenced.log"
|
||||
|
||||
#
|
||||
# Run the agent in the background, log its output, an kill it if we
|
||||
@@ -395,7 +394,7 @@ T_FENCED_LOG="$T_RESULTS/fenced.log"
|
||||
#
|
||||
fenced_log()
|
||||
{
|
||||
echo "[$(timestamp)] $*" >> "$T_FENCED_LOG"
|
||||
echo "[$(timestamp)] $*" >> "$T_RESULTS/fenced.stdout.log"
|
||||
}
|
||||
fenced_pid=""
|
||||
kill_fenced()
|
||||
@@ -406,7 +405,7 @@ kill_fenced()
|
||||
fi
|
||||
}
|
||||
trap kill_fenced EXIT
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_FENCED_LOG" 2>&1 &
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_RESULTS/fenced.stdout.log" 2> "$T_RESULTS/fenced.stderr.log" &
|
||||
fenced_pid=$!
|
||||
fenced_log "started fenced pid $fenced_pid in the background"
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ lock-refleak.sh
|
||||
lock-shrink-consistency.sh
|
||||
lock-pr-cw-conflict.sh
|
||||
lock-revoke-getcwd.sh
|
||||
lock-recover-invalidate.sh
|
||||
export-lookup-evict-race.sh
|
||||
createmany-parallel.sh
|
||||
createmany-large-names.sh
|
||||
|
||||
@@ -45,18 +45,6 @@ check_read_write()
|
||||
fi
|
||||
}
|
||||
|
||||
# verify that fenced ran our testing fence script
|
||||
verify_fenced_run()
|
||||
{
|
||||
local rids="$@"
|
||||
local rid
|
||||
|
||||
for rid in $rids; do
|
||||
grep -q ".* running rid '$rid'.* args 'ignored run args'" "$T_FENCED_LOG" || \
|
||||
t_fail "fenced didn't execute RUN script for rid $rid"
|
||||
done
|
||||
}
|
||||
|
||||
echo "== make sure all mounts can see each other"
|
||||
check_read_write
|
||||
|
||||
@@ -74,14 +62,12 @@ done
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_mount $cl
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount all non-server, connection timeout, fence nop, mount"
|
||||
sv=$(t_server_nr)
|
||||
pattern="nonsense"
|
||||
rids=""
|
||||
sync
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
@@ -89,7 +75,6 @@ for cl in $(t_fs_nrs); do
|
||||
fi
|
||||
|
||||
rid=$(t_mount_rid $cl)
|
||||
rids="$rids $rid"
|
||||
pattern="$pattern|$rid"
|
||||
echo "cl $cl sv $sv rid $rid" >> "$T_TMP.log"
|
||||
|
||||
@@ -104,7 +89,6 @@ done
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
# remount all the clients
|
||||
for cl in $(t_fs_nrs); do
|
||||
if [ $cl == $sv ]; then
|
||||
@@ -125,17 +109,11 @@ t_wait_for_leader
|
||||
while t_rid_is_fencing $rid; do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rid
|
||||
t_mount $sv
|
||||
check_read_write
|
||||
|
||||
echo "== force unmount everything, new server fences all previous"
|
||||
sync
|
||||
rids=""
|
||||
# get rids before forced unmount breaks scoutfs statfs
|
||||
for nr in $(t_fs_nrs); do
|
||||
rids="$rids $(t_mount_rid $nr)"
|
||||
done
|
||||
for nr in $(t_fs_nrs); do
|
||||
t_force_umount $nr
|
||||
done
|
||||
@@ -144,7 +122,6 @@ t_mount_all
|
||||
while test -d $(echo /sys/fs/scoutfs/*/fence/* | cut -d " " -f 1); do
|
||||
sleep .5
|
||||
done
|
||||
verify_fenced_run $rids
|
||||
check_read_write
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
#
|
||||
# trigger server failover and lock recovery during heavy invalidating
|
||||
# load on multiple mounts
|
||||
#
|
||||
|
||||
majority_nr=$(t_majority_count)
|
||||
quorum_nr=$T_QUORUM
|
||||
|
||||
test "$quorum_nr" == "$majority_nr" && \
|
||||
t_skip "need remaining majority when leader unmounted"
|
||||
|
||||
test "$T_NR_MOUNTS" -lt "$((quorum_nr + 2))" && \
|
||||
t_skip "need at least 2 non-quorum load mounts"
|
||||
|
||||
echo "== starting background invalidating read/write load"
|
||||
touch "$T_D0/file"
|
||||
load_pids=""
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$i" -ge "$quorum_nr" ]; then
|
||||
eval path="\$T_D${i}/file"
|
||||
|
||||
(while true; do touch $path > /dev/null 2>&1; done) &
|
||||
load_pids="$load_pids $!"
|
||||
(while true; do stat $path > /dev/null 2>&1; done) &
|
||||
load_pids="$load_pids $!"
|
||||
fi
|
||||
done
|
||||
|
||||
# had it reproduce in ~40s on wimpy debug kernel guests
|
||||
LENGTH=60
|
||||
echo "== ${LENGTH}s of lock recovery during invalidating load"
|
||||
END=$((SECONDS + LENGTH))
|
||||
while [ "$SECONDS" -lt "$END" ]; do
|
||||
sv=$(t_server_nr)
|
||||
t_umount $sv
|
||||
t_mount $sv
|
||||
# new server had to process greeting for mount to finish
|
||||
done
|
||||
|
||||
echo "== stopping background load"
|
||||
kill $load_pids
|
||||
|
||||
t_pass
|
||||
@@ -64,19 +64,29 @@ generic/029 # mmap missing
|
||||
generic/030 # mmap missing
|
||||
generic/075 # file content mismatch failures (fds, etc)
|
||||
generic/080 # mmap missing
|
||||
generic/091 # skip fsx tests
|
||||
generic/094 # odirect streaming pre-alloc treated as failure in xfstests
|
||||
generic/103 # enospc causes trans commit failures
|
||||
generic/105 # needs trigage: something about acls
|
||||
generic/108 # mount fails on failing device?
|
||||
generic/112 # file content mismatch failures (fds, etc)
|
||||
generic/113 # block aio dio runs
|
||||
generic/114 # block aio dio runs
|
||||
generic/120 # (can't exec 'cause no mmap)
|
||||
generic/126 # (can't exec 'cause no mmap)
|
||||
generic/141 # mmap missing
|
||||
generic/198 # block aio dio runs
|
||||
generic/207 # block aio dio runs
|
||||
generic/210 # block aio dio runs
|
||||
generic/213 # enospc causes trans commit failures
|
||||
generic/215 # mmap missing
|
||||
generic/225 # odirect streaming pre-alloc treated as failure in xfstests
|
||||
generic/237 # wrong error return from failing setfacl?
|
||||
generic/240 # block aio dio runs
|
||||
generic/246 # mmap missing
|
||||
generic/247 # mmap missing
|
||||
generic/248 # mmap missing
|
||||
generic/263 # do not support allocate mode FALLOC_FL_PUNCH_HOLE, FALLOC_FL_KEEP_SIZE, FALLOC_FL_ZERO_RANGE...
|
||||
generic/319 # utils output change? update branch?
|
||||
generic/321 # requires selinux enabled for '+' in ls?
|
||||
generic/325 # mmap missing
|
||||
|
||||
@@ -55,21 +55,9 @@ test -x "$SCOUTFS_FENCED_RUN" || \
|
||||
error_exit "SCOUTFS_FENCED_RUN '$SCOUTFS_FENCED_RUN' isn't executable"
|
||||
|
||||
#
|
||||
# Main loop watching for fence request across all filesystems. The
|
||||
# server can shut down without waiting for pending fence requests to
|
||||
# finish. All of the interaction with the fence directory and files can
|
||||
# fail at any moment. We will generate log messages when the dir or
|
||||
# files disappear.
|
||||
# main loop watching for fence request across all filesystems
|
||||
#
|
||||
|
||||
# generate failure messages to stderr while still echoing 0 for the caller
|
||||
careful_cat()
|
||||
{
|
||||
local path="$@"
|
||||
|
||||
cat "$@" || echo 0
|
||||
}
|
||||
|
||||
while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
for fence in /sys/fs/scoutfs/*/fence/*; do
|
||||
# catches unmatched regex when no dirs
|
||||
@@ -78,8 +66,7 @@ while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
fi
|
||||
|
||||
# skip requests that have been handled
|
||||
if [ "$(careful_cat $fence/fenced)" == 1 -o \
|
||||
"$(careful_cat $fence/error)" == 1 ]; then
|
||||
if [ $(cat "$fence/fenced") == 1 -o $(cat "$fence/error") == 1 ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -94,10 +81,10 @@ while sleep $SCOUTFS_FENCED_DELAY; do
|
||||
export SCOUTFS_FENCED_REQ_RID="$rid"
|
||||
export SCOUTFS_FENCED_REQ_IP="$ip"
|
||||
|
||||
$SCOUTFS_FENCED_RUN $SCOUTFS_FENCED_RUN_ARGS
|
||||
$run $SCOUTFS_FENCED_RUN_ARGS
|
||||
rc=$?
|
||||
if [ "$rc" != 0 ]; then
|
||||
log_message "server $srv fencing rid $rid saw error status $rc"
|
||||
log_message "server $srv fencing rid $rid saw error status $rc from $run"
|
||||
echo 1 > "$fence/error"
|
||||
continue
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user