Add network messaging between mounts

We're going to need communication between mounts to update and
distribute the manifest and allocators in the treap ring.

This adds a netwoking core where one mount becomes the server and other
mounts send requests to it.  The messaging semantics are pretty simple
in that clients reliably send requests and the server passively reply to
requests.  Complexity beyond that is up to the callers implementing the
requests.

It relies on locking to establish the server role and to broadcast the
address of the server socket.  We add a trivial lvb back to our local
test locking implementation to store the address.  We also add the
ability to shut down locking so that the locking networking work stops
blocking.

A little demonstration request is included which just gives visibility
into client and server clocks in the trace logs.  Next up we'll add the
requests that do real work.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2017-03-10 10:39:53 -08:00
parent 392ed81c43
commit 39ae89d85f
8 changed files with 1351 additions and 26 deletions

View File

@@ -3,5 +3,5 @@ obj-$(CONFIG_SCOUTFS_FS) := scoutfs.o
CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
scoutfs-y += alloc.o bio.o compact.o counters.o data.o dir.o kvec.o inode.o \
ioctl.o item.o key.o lock.o manifest.o msg.o seg.o \
ioctl.o item.o key.o lock.o manifest.o msg.o net.o seg.o \
scoutfs_trace.o super.o trans.o treap.o xattr.o

View File

@@ -172,7 +172,10 @@ struct scoutfs_segment_block {
#define SCOUTFS_EXTENT_KEY 9
#define SCOUTFS_ORPHAN_KEY 10
#define SCOUTFS_DATA_KEY 11
#define SCOUTFS_MAX_UNUSED_KEY 255
/* not found in the fs */
#define SCOUTFS_MAX_UNUSED_KEY 253
#define SCOUTFS_NET_ADDR_KEY 254
#define SCOUTFS_NET_LISTEN_KEY 255
/* value is struct scoutfs_inode */
struct scoutfs_inode_key {
@@ -348,4 +351,42 @@ enum {
#define SCOUTFS_MAX_KEY_SIZE \
offsetof(struct scoutfs_link_backref_key, name[SCOUTFS_NAME_LEN + 1])
/*
* messages over the wire.
*/
/* XXX ipv6 */
struct scoutfs_inet_addr {
__le32 addr;
__le16 port;
} __packed;
/*
* This header precedes and describes all network messages sent over
* sockets. The id is set by the request and sent in the reply. The
* type is strictly redundant in the reply because the id will find the
* send but we include it in both packets to make it easier to observe
* replies without having the id from their previous request.
*/
struct scoutfs_net_header {
__le64 id;
__le16 data_len;
__u8 type;
__u8 status;
__u8 data[0];
};
enum {
/* sends and receives a struct scoutfs_timeval */
SCOUTFS_NET_TRADE_TIME = 0,
SCOUTFS_NET_UNKNOWN,
};
enum {
SCOUTFS_NET_STATUS_REQUEST = 0,
SCOUTFS_NET_STATUS_SUCCESS,
SCOUTFS_NET_STATUS_ERROR,
SCOUTFS_NET_STATUS_UNKNOWN,
};
#endif

View File

@@ -35,8 +35,13 @@ struct held_locks {
spinlock_t lock;
struct list_head list;
wait_queue_head_t waitq;
};
/* super hacky fake lvb that only allows one specific key */
char fake_lvb[sizeof(struct scoutfs_inet_addr)];
struct scoutfs_key_buf fake_lvb_key;
char fake_lvb_key_data[SCOUTFS_MAX_KEY_SIZE];
};
/*
* allocated per-super. Stored in the global list for finding supers
@@ -45,6 +50,7 @@ struct held_locks {
*/
struct lock_info {
struct super_block *sb;
bool shutdown;
struct held_locks *held;
struct list_head id_head;
struct list_head global_head;
@@ -65,13 +71,20 @@ static bool compatible_locks(struct scoutfs_lock *a, struct scoutfs_lock *b)
scoutfs_key_compare_ranges(a->start, a->end, b->start, b->end);
}
static bool lock_added(struct held_locks *held, struct scoutfs_lock *add)
/* also returns true if we're shutting down, caller tests after waiting */
static bool lock_added(struct lock_info *linf, struct scoutfs_lock *add)
{
struct held_locks *held = linf->held;
struct scoutfs_lock *lck;
bool added = true;
spin_lock(&held->lock);
if (linf->shutdown) {
added = true;
goto out;
}
list_for_each_entry(lck, &held->list, head) {
if (!compatible_locks(lck, add)) {
added = false;
@@ -82,6 +95,7 @@ static bool lock_added(struct held_locks *held, struct scoutfs_lock *add)
if (added)
list_add(&add->head, &held->list);
out:
spin_unlock(&held->lock);
return added;
@@ -154,6 +168,74 @@ static void unlock(struct held_locks *held, struct scoutfs_lock *lck)
wake_up(&held->waitq);
}
static void assert_fake_lvb(struct held_locks *held,
struct scoutfs_key_buf *start,
struct scoutfs_key_buf *end, unsigned lvb_len)
{
BUG_ON(scoutfs_key_compare(start, end));
BUG_ON(lvb_len != sizeof(held->fake_lvb));
BUG_ON(held->fake_lvb_key.key_len &&
scoutfs_key_compare(&held->fake_lvb_key, start));
}
/*
* Acquire a coherent lock on the given range of keys. While the lock
* is held other lockers are serialized. Cache coherency is maintained
* by the locking infrastructure. Lock acquisition causes writeout from
* or invalidation of other caches.
*
* The caller provides the opaque lock structure used for storage and
* their start and end pointers will be accessed while the lock is held.
*/
int scoutfs_lock_range_lvb(struct super_block *sb, int mode,
struct scoutfs_key_buf *start,
struct scoutfs_key_buf *end,
void *caller_lvb, unsigned lvb_len,
struct scoutfs_lock *lck)
{
DECLARE_LOCK_INFO(sb, linf);
struct held_locks *held = linf->held;
int ret;
INIT_LIST_HEAD(&lck->head);
lck->sb = sb;
lck->start = start;
lck->end = end;
lck->mode = mode;
trace_scoutfs_lock_range(sb, lck);
ret = wait_event_interruptible(held->waitq, lock_added(linf, lck));
if (ret)
goto out;
if (linf->shutdown) {
/* unlocked, but we own it */
if (!list_empty(&lck->head))
unlock(held, lck);
ret = -ESHUTDOWN;
goto out;
}
ret = invalidate_others(sb, mode, start, end);
if (ret)
goto out;
if (caller_lvb) {
assert_fake_lvb(held, start, end, lvb_len);
if (mode == SCOUTFS_LOCK_MODE_WRITE) {
memcpy(held->fake_lvb, caller_lvb, lvb_len);
scoutfs_key_copy(&held->fake_lvb_key, start);
} else {
memcpy(caller_lvb, held->fake_lvb, lvb_len);
}
}
out:
return ret;
}
/*
* Acquire a coherent lock on the given range of keys. While the lock
* is held other lockers are serialized. Cache coherency is maintained
@@ -168,26 +250,7 @@ int scoutfs_lock_range(struct super_block *sb, int mode,
struct scoutfs_key_buf *end,
struct scoutfs_lock *lck)
{
DECLARE_LOCK_INFO(sb, linf);
struct held_locks *held = linf->held;
int ret;
INIT_LIST_HEAD(&lck->head);
lck->sb = sb;
lck->start = start;
lck->end = end;
lck->mode = mode;
trace_scoutfs_lock_range(sb, lck);
ret = wait_event_interruptible(held->waitq, lock_added(held, lck));
if (ret == 0) {
ret = invalidate_others(sb, mode, start, end);
if (ret)
unlock(held, lck);
}
return ret;
return scoutfs_lock_range_lvb(sb, mode, start, end, NULL, 0, lck);
}
void scoutfs_unlock_range(struct super_block *sb, struct scoutfs_lock *lck)
@@ -216,7 +279,7 @@ int scoutfs_lock_setup(struct super_block *sb)
if (!linf)
return -ENOMEM;
held = kmalloc(sizeof(struct held_locks), GFP_KERNEL);
held = kzalloc(sizeof(struct held_locks), GFP_KERNEL);
if (!held) {
kfree(linf);
return -ENOMEM;
@@ -225,8 +288,11 @@ int scoutfs_lock_setup(struct super_block *sb)
spin_lock_init(&held->lock);
INIT_LIST_HEAD(&held->list);
init_waitqueue_head(&held->waitq);
scoutfs_key_init_buf_len(&held->fake_lvb_key, &held->fake_lvb_key_data,
0, sizeof(held->fake_lvb_key_data));
linf->sb = sb;
linf->shutdown = false;
linf->held = held;
INIT_LIST_HEAD(&linf->id_head);
INIT_LIST_HEAD(&linf->global_head);
@@ -259,6 +325,24 @@ int scoutfs_lock_setup(struct super_block *sb)
return 0;
}
/*
* Cause all lock attempts from our super to fail, waking anyone who is
* currently blocked attempting to lock. Now that locks can't block we
* can easily tear down subsystems that use locking before freeing lock
* infrastructure.
*/
void scoutfs_lock_shutdown(struct super_block *sb)
{
DECLARE_LOCK_INFO(sb, linf);
struct held_locks *held = linf->held;
spin_lock(&held->lock);
linf->shutdown = true;
spin_unlock(&held->lock);
wake_up(&held->waitq);
}
void scoutfs_lock_destroy(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);

View File

@@ -18,9 +18,15 @@ int scoutfs_lock_range(struct super_block *sb, int mode,
struct scoutfs_key_buf *start,
struct scoutfs_key_buf *end,
struct scoutfs_lock *lck);
int scoutfs_lock_range_lvb(struct super_block *sb, int mode,
struct scoutfs_key_buf *start,
struct scoutfs_key_buf *end,
void *caller_lvb, unsigned lvb_len,
struct scoutfs_lock *lck);
void scoutfs_unlock_range(struct super_block *sb, struct scoutfs_lock *lck);
int scoutfs_lock_setup(struct super_block *sb);
void scoutfs_lock_shutdown(struct super_block *sb);
void scoutfs_lock_destroy(struct super_block *sb);
#endif

1177
kmod/src/net.c Normal file

File diff suppressed because it is too large Load Diff

9
kmod/src/net.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _SCOUTFS_NET_H_
#define _SCOUTFS_NET_H_
int scoutfs_net_trade_time(struct super_block *sb);
int scoutfs_net_setup(struct super_block *sb);
void scoutfs_net_destroy(struct super_block *sb);
#endif

View File

@@ -36,6 +36,7 @@
#include "compact.h"
#include "data.h"
#include "lock.h"
#include "net.h"
#include "scoutfs_trace.h"
static struct kset *scoutfs_kset;
@@ -221,10 +222,13 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
// scoutfs_buddy_setup(sb) ?:
scoutfs_compact_setup(sb) ?:
scoutfs_setup_trans(sb) ?:
scoutfs_lock_setup(sb);
scoutfs_lock_setup(sb) ?:
scoutfs_net_setup(sb);
if (ret)
return ret;
scoutfs_net_trade_time(sb);
scoutfs_advance_dirty_super(sb);
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO);
@@ -252,6 +256,8 @@ static void scoutfs_kill_sb(struct super_block *sb)
kill_block_super(sb);
if (sbi) {
scoutfs_lock_shutdown(sb);
scoutfs_net_destroy(sb);
scoutfs_lock_destroy(sb);
scoutfs_compact_destroy(sb);
scoutfs_shutdown_trans(sb);

View File

@@ -14,6 +14,7 @@ struct treap_info;
struct compact_info;
struct data_info;
struct lock_info;
struct net_info;
struct scoutfs_sb_info {
struct super_block *sb;
@@ -42,6 +43,7 @@ struct scoutfs_sb_info {
struct workqueue_struct *trans_write_workq;
struct lock_info *lock_info;
struct net_info *net_info;
/* $sysfs/fs/scoutfs/$id/ */
struct kset *kset;