scoutfs: avoid reading forest roots with block IO

The forest item operations were reading the super block to find the
roots that it should read items from.

This was easiest to implement to start, but it is too expensive.  We
have to find the roots for every newly acquired lock and every call to
walk the inode seq indexes.

To avoid all these reads we first send the current stable versions of
the fs and logs btrees roots along with root grants.  Then we add a net
command to get the current stable roots from the server.  This is used
to refresh the roots if stale blocks are encountered and on the seq
index queries.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2020-05-07 11:21:27 -07:00
committed by Zach Brown
parent 304dbbbafa
commit b7943c5412
11 changed files with 190 additions and 71 deletions

View File

@@ -108,6 +108,24 @@ int scoutfs_client_commit_log_trees(struct super_block *sb,
lt, sizeof(*lt), NULL, 0);
}
int scoutfs_client_get_fs_roots(struct super_block *sb,
struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
struct scoutfs_net_fs_roots nfr;
int ret;
ret = scoutfs_net_sync_request(sb, client->conn,
SCOUTFS_NET_CMD_GET_FS_ROOTS,
NULL, 0, &nfr, sizeof(nfr));
if (ret == 0) {
*fs_root = nfr.fs_root;
*logs_root = nfr.logs_root;
}
return 0;
}
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
@@ -157,7 +175,7 @@ static int client_lock_response(struct super_block *sb,
void *resp, unsigned int resp_len,
int error, void *data)
{
if (resp_len != sizeof(struct scoutfs_net_lock))
if (resp_len != sizeof(struct scoutfs_net_lock_grant_response))
return -EINVAL;
/* XXX error? */

View File

@@ -7,6 +7,9 @@ int scoutfs_client_get_log_trees(struct super_block *sb,
struct scoutfs_log_trees *lt);
int scoutfs_client_commit_log_trees(struct super_block *sb,
struct scoutfs_log_trees *lt);
int scoutfs_client_get_fs_roots(struct super_block *sb,
struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root);
u64 *scoutfs_client_bulk_alloc(struct super_block *sb);
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq);
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq);

View File

@@ -42,6 +42,9 @@
EXPAND_COUNTER(dentry_revalidate_root) \
EXPAND_COUNTER(dentry_revalidate_valid) \
EXPAND_COUNTER(dir_backref_excessive_retries) \
EXPAND_COUNTER(forest_roots_next_hint) \
EXPAND_COUNTER(forest_roots_lock) \
EXPAND_COUNTER(forest_roots_server) \
EXPAND_COUNTER(lock_alloc) \
EXPAND_COUNTER(lock_free) \
EXPAND_COUNTER(lock_grace_elapsed) \

View File

@@ -24,6 +24,7 @@
#include "radix.h"
#include "block.h"
#include "forest.h"
#include "counters.h"
#include "scoutfs_trace.h"
/*
@@ -76,7 +77,7 @@ struct forest_root {
u64 nr;
};
struct forest_super_refs {
struct forest_refs {
struct scoutfs_btree_ref fs_ref;
struct scoutfs_btree_ref logs_ref;
} __packed;
@@ -93,6 +94,7 @@ struct forest_bloom_nrs {
struct forest_lock_private {
u64 last_refreshed;
struct rw_semaphore rwsem;
unsigned int used_lock_roots:1;
struct list_head roots;
struct forest_root fs_root;
struct forest_root our_log_root;
@@ -283,28 +285,29 @@ static struct scoutfs_block *read_bloom_ref(struct super_block *sb,
*
* This doesn't deal with rereading stale blocks itself.. it returns
* ESTALE to the caller who already has to deal with retrying stale
* blocks from their btree reads. We give them the super refs we read
* so that they can identify persistent stale block errors that come
* from corruption.
* blocks from their btree reads. We give them the refs we read so that
* they can identify persistent stale block errors that come from
* corruption.
*
* Because we're starting all the reads from a stable read super this
* will not see any dirty blocks we have in memory. We don't have to
* lock any of the btree reads. It also won't find the currently dirty
* version of our log btree. Writers mark our static log btree in lpriv
* to indicate that we should include our dirty log btree in reads.
* We'll also naturally add it if we see a persistent version on disk
* with all of the bloom bits set.
* Because we're starting all the reads from stable refs from the
* server, this will not see any dirty blocks we have in memory. We
* don't have to lock any of the btree reads. It also won't find the
* currently dirty version of our log btree. Writers mark our static
* log btree in lpriv to indicate that we should include our dirty log
* btree in reads. We'll also naturally add it if we see a persistent
* version on disk with all of the bloom bits set.
*/
static int refresh_bloom_roots(struct super_block *sb,
struct scoutfs_lock *lock,
struct forest_super_refs *srefs)
struct forest_refs *refs)
{
DECLARE_FOREST_INFO(sb, finf);
struct forest_lock_private *lpriv = ACCESS_ONCE(lock->forest_private);
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
struct scoutfs_log_trees_val ltv;
SCOUTFS_BTREE_ITEM_REF(iref);
struct forest_bloom_nrs bloom;
struct scoutfs_super_block super;
struct forest_root *fr = NULL;
struct scoutfs_bloom_block *bb;
struct scoutfs_block *bl;
@@ -312,26 +315,36 @@ static int refresh_bloom_roots(struct super_block *sb,
int ret;
int i;
memset(srefs, 0, sizeof(*srefs));
memset(refs, 0, sizeof(*refs));
down_write(&lpriv->rwsem);
/* empty the list so no one iterates until someone's added */
clear_roots(lpriv);
ret = scoutfs_read_super(sb, &super);
if (ret)
goto out;
/* first use the lock's constant roots, then sample newer roots */
if (!lpriv->used_lock_roots) {
lpriv->used_lock_roots = 1;
fs_root = lock->fs_root;
logs_root = lock->logs_root;
scoutfs_inc_counter(sb, forest_roots_lock);
} else {
ret = scoutfs_client_get_fs_roots(sb, &fs_root, &logs_root);
if (ret)
goto out;
scoutfs_inc_counter(sb, forest_roots_server);
}
trace_scoutfs_forest_read_super(sb, &super);
srefs->fs_ref = super.fs_root.ref;
srefs->logs_ref = super.logs_root.ref;
trace_scoutfs_forest_using_roots(sb, &fs_root, &logs_root);
refs->fs_ref = fs_root.ref;
refs->logs_ref = logs_root.ref;
calc_bloom_nrs(&bloom, &lock->start);
scoutfs_key_init_log_trees(&key, 0, 0);
for (;; scoutfs_key_inc(&key)) {
ret = scoutfs_btree_next(sb, &super.logs_root, &key, &iref);
ret = scoutfs_btree_next(sb, &logs_root, &key, &iref);
if (ret == -ENOENT) {
ret = 0;
break;
@@ -408,7 +421,7 @@ static int refresh_bloom_roots(struct super_block *sb,
/* always add the fs root at the tail */
fr = &lpriv->fs_root;
fr->item_root = super.fs_root;
fr->item_root = fs_root;
fr->rid = 0;
fr->nr = 0;
list_add_tail(&fr->entry, &lpriv->roots);
@@ -420,14 +433,15 @@ static int refresh_bloom_roots(struct super_block *sb,
out:
if (ret < 0)
clear_roots(lpriv);
up_write(&lpriv->rwsem);
return ret;
}
/* initialize some super refs that initially aren't equal */
#define DECLARE_STALE_TRACKING_SUPER_REFS(a, b) \
struct forest_super_refs a = {{cpu_to_le64(0),}}; \
struct forest_super_refs b = {{cpu_to_le64(1),}}
/* initialize some refs that initially aren't equal */
#define DECLARE_STALE_TRACKING_SUPER_REFS(a, b) \
struct forest_refs a = {{cpu_to_le64(0),}}; \
struct forest_refs b = {{cpu_to_le64(1),}}
/*
* The caller saw stale blocks. If they're seeing the same root refs
@@ -439,19 +453,16 @@ out:
*/
static int refresh_check_stale(struct super_block *sb,
struct scoutfs_lock *lock,
struct forest_super_refs *prev_srefs,
struct forest_super_refs *srefs)
struct forest_refs *prev_refs,
struct forest_refs *refs)
{
struct forest_lock_private *lpriv = ACCESS_ONCE(lock->forest_private);
int ret;
if (memcmp(prev_srefs, srefs, sizeof(*srefs)) == 0)
if (memcmp(prev_refs, refs, sizeof(*refs)) == 0)
return -EIO;
*prev_srefs = *srefs;
*prev_refs = *refs;
down_write(&lpriv->rwsem);
ret = refresh_bloom_roots(sb, lock, srefs);
up_write(&lpriv->rwsem);
ret = refresh_bloom_roots(sb, lock, refs);
if (ret == -ESTALE)
ret = 0;
@@ -563,7 +574,7 @@ int scoutfs_forest_lookup(struct super_block *sb, struct scoutfs_key *key,
struct kvec *val, struct scoutfs_lock *lock)
{
DECLARE_FOREST_INFO(sb, finf);
DECLARE_STALE_TRACKING_SUPER_REFS(prev_srefs, srefs);
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
struct forest_lock_private *lpriv;
SCOUTFS_BTREE_ITEM_REF(iref);
struct forest_root *fr;
@@ -625,7 +636,7 @@ retry:
up_read(&lpriv->rwsem);
if (err == -ESTALE) {
err = refresh_check_stale(sb, lock, &prev_srefs, &srefs);
err = refresh_check_stale(sb, lock, &prev_refs, &refs);
if (err == 0)
goto retry;
ret = err;
@@ -823,7 +834,7 @@ static int forest_iter(struct super_block *sb, struct scoutfs_key *key,
struct scoutfs_key *end, struct kvec *val,
struct scoutfs_lock *lock, bool fwd)
{
DECLARE_STALE_TRACKING_SUPER_REFS(prev_srefs, srefs);
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
struct forest_lock_private *lpriv;
DECLARE_FOREST_INFO(sb, finf);
SCOUTFS_BTREE_ITEM_REF(iref);
@@ -964,7 +975,7 @@ unlock:
}
if (ret == -ESTALE) {
ret = refresh_check_stale(sb, lock, &prev_srefs, &srefs);
ret = refresh_check_stale(sb, lock, &prev_refs, &refs);
if (ret == 0)
goto retry;
}
@@ -1001,8 +1012,8 @@ int scoutfs_forest_prev(struct super_block *sb, struct scoutfs_key *key,
* This is an unlocked iteration across all the btrees to find a hint at
* the next key that the caller could read. It's used to find out what
* next key range to lock, presuming you're allowed to only see items
* that have been synced. We read the super every time to get the most
* recent trees.
* that have been synced. We ask the server for the current roots to
* check.
*
* We don't bother skipping deletion or reservation items here. They're
* unlikely. The caller will iterate them over safely and call again to
@@ -1014,8 +1025,9 @@ int scoutfs_forest_prev(struct super_block *sb, struct scoutfs_key *key,
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
struct scoutfs_key *next)
{
DECLARE_STALE_TRACKING_SUPER_REFS(prev_srefs, srefs);
struct scoutfs_super_block super;
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
struct scoutfs_log_trees_val ltv;
SCOUTFS_BTREE_ITEM_REF(iref);
struct scoutfs_key found;
@@ -1024,19 +1036,21 @@ int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
int ret;
retry:
ret = scoutfs_read_super(sb, &super);
scoutfs_inc_counter(sb, forest_roots_next_hint);
ret = scoutfs_client_get_fs_roots(sb, &fs_root, &logs_root);
if (ret)
goto out;
srefs.fs_ref = super.fs_root.ref;
srefs.logs_ref = super.logs_root.ref;
trace_scoutfs_forest_using_roots(sb, &fs_root, &logs_root);
refs.fs_ref = fs_root.ref;
refs.logs_ref = logs_root.ref;
scoutfs_key_init_log_trees(&ltk, 0, 0);
have_next = false;
for (;; scoutfs_key_inc(&ltk)) {
ret = scoutfs_btree_next(sb, &super.logs_root, &ltk, &iref);
ret = scoutfs_btree_next(sb, &logs_root, &ltk, &iref);
if (ret == -ENOENT) {
if (have_next)
ret = 0;
@@ -1075,9 +1089,9 @@ retry:
}
if (ret == -ESTALE) {
if (memcmp(&prev_srefs, &srefs, sizeof(srefs)) == 0)
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
return -EIO;
prev_srefs = srefs;
prev_refs = refs;
goto retry;
}
out:

View File

@@ -683,6 +683,7 @@ enum {
SCOUTFS_NET_CMD_ALLOC_INODES,
SCOUTFS_NET_CMD_GET_LOG_TREES,
SCOUTFS_NET_CMD_COMMIT_LOG_TREES,
SCOUTFS_NET_CMD_GET_FS_ROOTS,
SCOUTFS_NET_CMD_ADVANCE_SEQ,
SCOUTFS_NET_CMD_GET_LAST_SEQ,
SCOUTFS_NET_CMD_STATFS,
@@ -731,6 +732,11 @@ struct scoutfs_net_statfs {
__u8 uuid[SCOUTFS_UUID_BYTES]; /* logical volume uuid */
} __packed;
struct scoutfs_net_fs_roots {
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
} __packed;
struct scoutfs_net_lock {
struct scoutfs_key key;
__le64 write_version;
@@ -738,6 +744,11 @@ struct scoutfs_net_lock {
__u8 new_mode;
} __packed;
struct scoutfs_net_lock_grant_response {
struct scoutfs_net_lock nl;
struct scoutfs_net_fs_roots nfr;
} __packed;
struct scoutfs_net_lock_recover {
__le16 nr;
struct scoutfs_net_lock locks[0];

View File

@@ -556,8 +556,9 @@ static void extend_grace(struct super_block *sb, struct scoutfs_lock *lock)
* period anyway as they unlock.
*/
int scoutfs_lock_grant_response(struct super_block *sb,
struct scoutfs_net_lock *nl)
struct scoutfs_net_lock_grant_response *gr)
{
struct scoutfs_net_lock *nl = &gr->nl;
DECLARE_LOCK_INFO(sb, linfo);
struct scoutfs_lock *lock;
@@ -589,6 +590,8 @@ int scoutfs_lock_grant_response(struct super_block *sb,
lock->request_pending = 0;
lock->mode = nl->new_mode;
lock->write_version = le64_to_cpu(nl->write_version);
lock->fs_root = gr->nfr.fs_root;
lock->logs_root = gr->nfr.logs_root;
if (lock_count_match_exists(nl->new_mode, lock->waiters))
extend_grace(sb, lock);

View File

@@ -22,6 +22,8 @@ struct scoutfs_lock {
struct rb_node range_node;
u64 refresh_gen;
u64 write_version;
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
struct list_head lru_head;
wait_queue_head_t waitq;
struct work_struct shrink_work;
@@ -49,7 +51,7 @@ struct scoutfs_lock_coverage {
};
int scoutfs_lock_grant_response(struct super_block *sb,
struct scoutfs_net_lock *nl);
struct scoutfs_net_lock_grant_response *gr);
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock *nl);
int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,

View File

@@ -489,6 +489,7 @@ static int process_waiting_requests(struct super_block *sb,
struct server_lock_node *snode)
{
DECLARE_LOCK_SERVER_INFO(sb, inf);
struct scoutfs_net_lock_grant_response gres;
struct scoutfs_net_lock nl;
struct client_lock_entry *req;
struct client_lock_entry *req_tmp;
@@ -552,8 +553,12 @@ static int process_waiting_requests(struct super_block *sb,
nl.write_version = cpu_to_le64(wv);
}
gres.nl = nl;
scoutfs_server_get_fs_roots(sb, &gres.nfr.fs_root,
&gres.nfr.logs_root);
ret = scoutfs_server_lock_response(sb, req->rid,
req->net_id, &nl);
req->net_id, &gres);
if (ret)
goto out;

View File

@@ -2044,12 +2044,12 @@ TRACE_EVENT(scoutfs_forest_prepare_commit,
__entry->bloom_blkno, __entry->bloom_seq)
);
TRACE_EVENT(scoutfs_forest_read_super,
TP_PROTO(struct super_block *sb, struct scoutfs_super_block *super),
TP_ARGS(sb, super),
TRACE_EVENT(scoutfs_forest_using_roots,
TP_PROTO(struct super_block *sb, struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root),
TP_ARGS(sb, fs_root, logs_root),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(__u64, hdr_seq)
__field(__u64, fs_blkno)
__field(__u64, fs_seq)
__field(__u64, logs_blkno)
@@ -2057,15 +2057,14 @@ TRACE_EVENT(scoutfs_forest_read_super,
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->hdr_seq = le64_to_cpu(super->hdr.seq);
__entry->fs_blkno = le64_to_cpu(super->fs_root.ref.blkno);
__entry->fs_seq = le64_to_cpu(super->fs_root.ref.seq);
__entry->logs_blkno = le64_to_cpu(super->logs_root.ref.blkno);
__entry->logs_seq = le64_to_cpu(super->logs_root.ref.seq);
__entry->fs_blkno = le64_to_cpu(fs_root->ref.blkno);
__entry->fs_seq = le64_to_cpu(fs_root->ref.seq);
__entry->logs_blkno = le64_to_cpu(logs_root->ref.blkno);
__entry->logs_seq = le64_to_cpu(logs_root->ref.seq);
),
TP_printk(SCSBF" hdr seq %llu fs blkno %llu seq %llu logs blkno %llu seq %llu",
SCSB_TRACE_ARGS, __entry->hdr_seq, __entry->fs_blkno,
__entry->fs_seq, __entry->logs_blkno, __entry->logs_seq)
TP_printk(SCSBF" fs blkno %llu seq %llu logs blkno %llu seq %llu",
SCSB_TRACE_ARGS, __entry->fs_blkno, __entry->fs_seq,
__entry->logs_blkno, __entry->logs_seq)
);
TRACE_EVENT(scoutfs_forest_add_root,

View File

@@ -84,6 +84,11 @@ struct server_info {
struct scoutfs_block_writer wri;
struct mutex logs_mutex;
/* stable versions stored from commits, given in locks and rpcs */
seqcount_t fs_roots_seqcount;
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
};
#define DECLARE_SERVER_INFO(sb, name) \
@@ -216,6 +221,32 @@ static void update_free_blocks(__le64 *blocks, struct scoutfs_radix_root *prev,
le64_to_cpu(prev->ref.sm_total));
}
void scoutfs_server_get_fs_roots(struct super_block *sb,
struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root)
{
DECLARE_SERVER_INFO(sb, server);
unsigned int seq;
do {
seq = read_seqcount_begin(&server->fs_roots_seqcount);
*fs_root = server->fs_root;
*logs_root = server->logs_root;
} while (read_seqcount_retry(&server->fs_roots_seqcount, seq));
}
static void set_fs_roots(struct server_info *server,
struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root)
{
preempt_disable();
write_seqcount_begin(&server->fs_roots_seqcount);
server->fs_root = *fs_root;
server->logs_root = *logs_root;
write_seqcount_end(&server->fs_roots_seqcount);
preempt_enable();
}
/*
* Concurrent request processing dirties blocks in a commit and makes
* the modifications persistent before replying. We'd like to batch
@@ -270,6 +301,7 @@ static void scoutfs_server_commit_func(struct work_struct *work)
}
server->prepared_commit = false;
set_fs_roots(server, &super->fs_root, &super->logs_root);
ret = 0;
out:
node = llist_del_all(&server->commit_waiters);
@@ -534,6 +566,29 @@ out:
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
}
/*
* Give the client the most recent version of the fs btrees that are
* visible in persistent storage. We don't want to accidentally give
* them our in-memory dirty version. This can be racing with commits.
*/
static int server_get_fs_roots(struct super_block *sb,
struct scoutfs_net_connection *conn,
u8 cmd, u64 id, void *arg, u16 arg_len)
{
struct scoutfs_net_fs_roots nfr;
int ret;
if (arg_len != 0) {
memset(&nfr, 0, sizeof(nfr));
ret = -EINVAL;
} else {
scoutfs_server_get_fs_roots(sb, &nfr.fs_root, &nfr.logs_root);
ret = 0;
}
return scoutfs_net_response(sb, conn, cmd, id, 0, &nfr, sizeof(nfr));
}
/*
* A client is being evicted so we want to reclaim resources from their
* log tree items. The item trees and bloom refs stay around to be read
@@ -863,14 +918,14 @@ int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
lock_response, NULL, NULL);
}
int scoutfs_server_lock_response(struct super_block *sb, u64 rid,
u64 id, struct scoutfs_net_lock *nl)
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_net_lock_grant_response *gr)
{
struct server_info *server = SCOUTFS_SB(sb)->server_info;
return scoutfs_net_response_node(sb, server->conn, rid,
SCOUTFS_NET_CMD_LOCK, id, 0,
nl, sizeof(*nl));
gr, sizeof(*gr));
}
static bool invalid_recover(struct scoutfs_net_lock_recover *nlr,
@@ -1328,6 +1383,7 @@ static scoutfs_net_request_t server_req_funcs[] = {
[SCOUTFS_NET_CMD_ALLOC_INODES] = server_alloc_inodes,
[SCOUTFS_NET_CMD_GET_LOG_TREES] = server_get_log_trees,
[SCOUTFS_NET_CMD_COMMIT_LOG_TREES] = server_commit_log_trees,
[SCOUTFS_NET_CMD_GET_FS_ROOTS] = server_get_fs_roots,
[SCOUTFS_NET_CMD_ADVANCE_SEQ] = server_advance_seq,
[SCOUTFS_NET_CMD_GET_LAST_SEQ] = server_get_last_seq,
[SCOUTFS_NET_CMD_STATFS] = server_statfs,
@@ -1418,6 +1474,7 @@ static void scoutfs_server_worker(struct work_struct *work)
if (ret < 0)
goto shutdown;
set_fs_roots(server, &super->fs_root, &super->logs_root);
scoutfs_radix_init_alloc(&server->alloc, &super->core_meta_avail,
&super->core_meta_freed);
scoutfs_block_writer_init(sb, &server->wri);
@@ -1557,6 +1614,7 @@ int scoutfs_server_setup(struct super_block *sb)
INIT_LIST_HEAD(&server->farewell_requests);
INIT_WORK(&server->farewell_work, farewell_worker);
mutex_init(&server->logs_mutex);
seqcount_init(&server->fs_roots_seqcount);
server->wq = alloc_workqueue("scoutfs_server",
WQ_UNBOUND | WQ_NON_REENTRANT, 0);

View File

@@ -58,12 +58,15 @@ do { \
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
struct scoutfs_net_lock *nl);
int scoutfs_server_lock_response(struct super_block *sb, u64 rid,
u64 id, struct scoutfs_net_lock *nl);
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
struct scoutfs_net_lock_grant_response *gr);
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
struct scoutfs_key *key);
int scoutfs_server_hold_commit(struct super_block *sb);
int scoutfs_server_apply_commit(struct super_block *sb, int err);
void scoutfs_server_get_fs_roots(struct super_block *sb,
struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root);
struct sockaddr_in;
struct scoutfs_quorum_elected_info;