mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-10 13:47:27 +00:00
Merge pull request #18 from versity/zab/quorum_slots_unmount
Zab/quorum slots unmount
This commit is contained in:
33
README.md
33
README.md
@@ -65,8 +65,13 @@ The steps for getting scoutfs mounted and operational are:
|
||||
2. Make a new filesystem on the devices with the userspace utilities
|
||||
3. Mount the devices on all the nodes
|
||||
|
||||
In this example we run all of these commands on three nodes. The names
|
||||
of the block devices are the same on all the nodes.
|
||||
In this example we use three nodes. The names of the block devices are
|
||||
the same on all the nodes. Two of the nodes will be quorum members. A
|
||||
majority of quorum members must be mounted to elect a leader to run a
|
||||
server that all the mounts connect to. It should be noted that two
|
||||
quorum members results in a majority of one, each member itself, so
|
||||
split brain elections are possible but so unlikely that it's fine for a
|
||||
demonstration.
|
||||
|
||||
1. Get the Kernel Module and Userspace Binaries
|
||||
|
||||
@@ -88,24 +93,30 @@ of the block devices are the same on all the nodes.
|
||||
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
|
||||
```
|
||||
|
||||
2. Make a New Filesystem (**destroys contents, no questions asked**)
|
||||
2. Make a New Filesystem (**destroys contents**)
|
||||
|
||||
We specify that two of our three nodes must be present to form a
|
||||
quorum for the system to function.
|
||||
We specify quorum slots with the addresses of each of the quorum
|
||||
member nodes, the metadata device, and the data device.
|
||||
|
||||
```shell
|
||||
scoutfs mkfs -Q 2 /dev/meta_dev /dev/data_dev
|
||||
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
|
||||
```
|
||||
|
||||
3. Mount the Filesystem
|
||||
|
||||
Each mounting node provides its local IP address on which it will run
|
||||
an internal server for the other mounts if it is elected the leader by
|
||||
the quorum.
|
||||
First, mount each of the quorum nodes so that they can elect and
|
||||
start a server for the remaining node to connect to. The slot numbers
|
||||
were specified with the leading "0,..." and "1,..." in the mkfs options
|
||||
above.
|
||||
|
||||
```shell
|
||||
mkdir /mnt/scoutfs
|
||||
mount -t scoutfs -o server_addr=$NODE_ADDR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
Then mount the remaining node which can now connect to the running server.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
4. For Kicks, Observe the Metadata Change Index
|
||||
|
||||
@@ -34,13 +34,10 @@
|
||||
|
||||
/*
|
||||
* The client is responsible for maintaining a connection to the server.
|
||||
* This includes managing quorum elections that determine which client
|
||||
* should run the server that all the clients connect to.
|
||||
*/
|
||||
|
||||
#define CLIENT_CONNECT_DELAY_MS (MSEC_PER_SEC / 10)
|
||||
#define CLIENT_CONNECT_TIMEOUT_MS (1 * MSEC_PER_SEC)
|
||||
#define CLIENT_QUORUM_TIMEOUT_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct client_info {
|
||||
struct super_block *sb;
|
||||
@@ -292,52 +289,30 @@ static int client_greeting(struct super_block *sb,
|
||||
scoutfs_net_client_greeting(sb, conn, new_server);
|
||||
|
||||
client->server_term = le64_to_cpu(gr->server_term);
|
||||
client->greeting_umb = le64_to_cpu(gr->unmount_barrier);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
* the work if the work fails and we're not shutting down.
|
||||
* The client is deciding if it needs to keep trying to reconnect to
|
||||
* have its farewell request processed. The server removes our mounted
|
||||
* client item last so that if we don't see it we know the server has
|
||||
* processed our farewell and we don't need to reconnect, we can unmount
|
||||
* safely.
|
||||
*
|
||||
* In the typical case a mount reads the super blocks and finds the
|
||||
* address of the currently running server and connects to it.
|
||||
* Non-voting clients who can't connect will keep trying alternating
|
||||
* reading the address and getting connect timeouts.
|
||||
*
|
||||
* Voting mounts will try to elect a leader if they can't connect to the
|
||||
* server. When a quorum can't connect and are able to elect a leader
|
||||
* then a new server is started. The new server will write its address
|
||||
* in the super and everyone will be able to connect.
|
||||
*
|
||||
* There's a tricky bit of coordination required to safely unmount.
|
||||
* Clients need to tell the server that they won't be coming back with a
|
||||
* farewell request. Once a client receives its farewell response it
|
||||
* can exit. But a majority of clients need to stick around to elect a
|
||||
* server to process all their farewell requests. This is coordinated
|
||||
* by having the greeting tell the server that a client is a voter. The
|
||||
* server then holds on to farewell requests from voters until only
|
||||
* requests from the final quorum remain. These farewell responses are
|
||||
* only sent after updating an unmount barrier in the super to indicate
|
||||
* to the final quorum that they can safely exit without having received
|
||||
* a farewell response over the network.
|
||||
* This is peeking at btree blocks that the server could be actively
|
||||
* freeing with cow updates so it can see stale blocks, we just return
|
||||
* the error and we'll retry eventually as the connection times out.
|
||||
*/
|
||||
static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
|
||||
{
|
||||
struct client_info *client = container_of(work, struct client_info,
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_voter = opts->server_addr.sin_addr.s_addr != 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
ktime_t timeout_abs;
|
||||
u64 elected_term;
|
||||
struct scoutfs_key key = {
|
||||
.sk_zone = SCOUTFS_MOUNTED_CLIENT_ZONE,
|
||||
.skmc_rid = cpu_to_le64(rid),
|
||||
};
|
||||
struct scoutfs_super_block *super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -350,57 +325,77 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* can safely unmount if we see that server processed our farewell */
|
||||
if (am_voter && client->sending_farewell &&
|
||||
(le64_to_cpu(super->unmount_barrier) > client->greeting_umb)) {
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key, &iref);
|
||||
if (ret == 0) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = 1;
|
||||
}
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
kfree(super);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
* the work if the work fails and we're not shutting down.
|
||||
*
|
||||
* We ask quorum for an address to try and connect to. If there isn't
|
||||
* one, or it fails, we back off a bit before trying again.
|
||||
*
|
||||
* There's a tricky bit of coordination required to safely unmount.
|
||||
* Clients need to tell the server that they won't be coming back with a
|
||||
* farewell request. Once the server processes a farewell request from
|
||||
* the client it can forget the client. If the connection is broken
|
||||
* before the client gets the farewell response it doesn't want to
|
||||
* reconnect to send it again.. instead the client can read the metadata
|
||||
* device to check for the lack of an item which indicates that the
|
||||
* server has processed its farewell.
|
||||
*/
|
||||
static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct client_info *client = container_of(work, struct client_info,
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
int ret;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
client->farewell_error = 0;
|
||||
complete(&client->farewell_comp);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* try to connect to the super's server address */
|
||||
scoutfs_addr_to_sin(&sin, &super->server_addr);
|
||||
if (sin.sin_addr.s_addr != 0 && sin.sin_port != 0)
|
||||
ret = scoutfs_net_connect(sb, client->conn, &sin,
|
||||
CLIENT_CONNECT_TIMEOUT_MS);
|
||||
else
|
||||
ret = -ENOTCONN;
|
||||
|
||||
/* voters try to elect a leader if they couldn't connect */
|
||||
if (ret < 0) {
|
||||
/* non-voters will keep retrying */
|
||||
if (!am_voter)
|
||||
goto out;
|
||||
|
||||
/* make sure local server isn't writing super during votes */
|
||||
scoutfs_server_stop(sb);
|
||||
|
||||
timeout_abs = ktime_add_ms(ktime_get(),
|
||||
CLIENT_QUORUM_TIMEOUT_MS);
|
||||
|
||||
ret = scoutfs_quorum_election(sb, timeout_abs,
|
||||
le64_to_cpu(super->quorum_server_term),
|
||||
&elected_term);
|
||||
/* start the server if we were asked to */
|
||||
if (elected_term > 0)
|
||||
ret = scoutfs_server_start(sb, &opts->server_addr,
|
||||
elected_term);
|
||||
ret = -ENOTCONN;
|
||||
ret = scoutfs_quorum_server_sin(sb, &sin);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_net_connect(sb, client->conn, &sin,
|
||||
CLIENT_CONNECT_TIMEOUT_MS);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.unmount_barrier = cpu_to_le64(client->greeting_umb);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
greet.flags = 0;
|
||||
if (client->sending_farewell)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_FAREWELL);
|
||||
if (am_voter)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_VOTER);
|
||||
if (am_quorum)
|
||||
greet.flags |= cpu_to_le64(SCOUTFS_NET_GREETING_FLAG_QUORUM);
|
||||
|
||||
ret = scoutfs_net_submit_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_GREETING,
|
||||
@@ -409,7 +404,6 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
out:
|
||||
kfree(super);
|
||||
|
||||
/* always have a small delay before retrying to avoid storms */
|
||||
if (ret && !atomic_read(&client->shutting_down))
|
||||
|
||||
@@ -139,18 +139,21 @@
|
||||
EXPAND_COUNTER(net_recv_invalid_message) \
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(quorum_cycle) \
|
||||
EXPAND_COUNTER(quorum_elected_leader) \
|
||||
EXPAND_COUNTER(quorum_election_timeout) \
|
||||
EXPAND_COUNTER(quorum_failure) \
|
||||
EXPAND_COUNTER(quorum_read_block) \
|
||||
EXPAND_COUNTER(quorum_read_block_error) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
EXPAND_COUNTER(quorum_read_invalid_block) \
|
||||
EXPAND_COUNTER(quorum_saw_super_leader) \
|
||||
EXPAND_COUNTER(quorum_timedout) \
|
||||
EXPAND_COUNTER(quorum_write_block) \
|
||||
EXPAND_COUNTER(quorum_write_block_error) \
|
||||
EXPAND_COUNTER(quorum_fenced) \
|
||||
EXPAND_COUNTER(quorum_recv_error) \
|
||||
EXPAND_COUNTER(quorum_recv_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_recv_invalid) \
|
||||
EXPAND_COUNTER(quorum_recv_resignation) \
|
||||
EXPAND_COUNTER(quorum_recv_vote) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_send_resignation) \
|
||||
EXPAND_COUNTER(quorum_send_request) \
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
EXPAND_COUNTER(quorum_server_shutdown) \
|
||||
EXPAND_COUNTER(quorum_term_follower) \
|
||||
EXPAND_COUNTER(server_commit_hold) \
|
||||
EXPAND_COUNTER(server_commit_queue) \
|
||||
EXPAND_COUNTER(server_commit_worker) \
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#define SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK 0x897e4a7d
|
||||
#define SCOUTFS_BLOCK_MAGIC_SRCH_PARENT 0xb23a2a05
|
||||
#define SCOUTFS_BLOCK_MAGIC_ALLOC_LIST 0x8a93ac83
|
||||
#define SCOUTFS_BLOCK_MAGIC_QUORUM 0xbc310868
|
||||
|
||||
/*
|
||||
* The super block, quorum block, and file data allocation granularity
|
||||
@@ -54,15 +55,19 @@
|
||||
#define SCOUTFS_SUPER_BLKNO ((64ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* A reasonably large region of aligned quorum blocks follow the super
|
||||
* block. Each voting cycle reads the entire region so we don't want it
|
||||
* to be too enormous. 256K seems like a reasonably chunky single IO.
|
||||
* The number of blocks in the region also determines the number of
|
||||
* mounts that have a reasonable probability of not overwriting each
|
||||
* other's random block locations.
|
||||
* A small number of quorum blocks follow the super block, enough of
|
||||
* them to match the starting offset of the super block so the region is
|
||||
* aligned to the power of two that contains it.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_BLKNO ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
#define SCOUTFS_QUORUM_BLOCKS ((256ULL * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
#define SCOUTFS_QUORUM_BLKNO (SCOUTFS_SUPER_BLKNO + 1)
|
||||
#define SCOUTFS_QUORUM_BLOCKS (SCOUTFS_SUPER_BLKNO - 1)
|
||||
|
||||
/*
|
||||
* Free metadata blocks start after the quorum blocks
|
||||
*/
|
||||
#define SCOUTFS_META_DEV_START_BLKNO \
|
||||
((SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >> \
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT)
|
||||
|
||||
/*
|
||||
* Start data on the data device aligned as well.
|
||||
@@ -306,7 +311,7 @@ struct scoutfs_mounted_client_btree_val {
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_MOUNTED_CLIENT_VOTER (1 << 0)
|
||||
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
|
||||
|
||||
/*
|
||||
* srch files are a contiguous run of blocks with compressed entries
|
||||
@@ -537,49 +542,77 @@ struct scoutfs_xattr {
|
||||
|
||||
#define SCOUTFS_UUID_BYTES 16
|
||||
|
||||
/*
|
||||
* Mounts read all the quorum blocks and write to one random quorum
|
||||
* block during a cycle. The min cycle time limits the per-mount iop
|
||||
* load during elections. The random cycle delay makes it less likely
|
||||
* that mounts will read and write at the same time and miss each
|
||||
* other's writes. An election only completes if a quorum of mounts
|
||||
* vote for a leader before any of their elections timeout. This is
|
||||
* made less likely by the probability that mounts will overwrite each
|
||||
* others random block locations. The max quorum count limits that
|
||||
* probability. 9 mounts only have a 55% chance of writing to unique 4k
|
||||
* blocks in a 256k region. The election timeout is set to include
|
||||
* enough cycles to usually complete the election. Once a leader is
|
||||
* elected it spends a number of cycles writing out blocks with itself
|
||||
* logged as a leader. This reduces the possibility that servers
|
||||
* will have their log entries overwritten and not be fenced.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_MAX_COUNT 9
|
||||
#define SCOUTFS_QUORUM_CYCLE_LO_MS 10
|
||||
#define SCOUTFS_QUORUM_CYCLE_HI_MS 20
|
||||
#define SCOUTFS_QUORUM_TERM_LO_MS 250
|
||||
#define SCOUTFS_QUORUM_TERM_HI_MS 500
|
||||
#define SCOUTFS_QUORUM_ELECTED_LOG_CYCLES 10
|
||||
#define SCOUTFS_QUORUM_MAX_SLOTS 15
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
/*
|
||||
* To elect a leader, members race to have their variable election
|
||||
* timeouts expire. If they're first to send a vote request with a
|
||||
* greater term to a majority of waiting members they'll be elected with
|
||||
* a majority. If the timeouts are too close, the vote may be split and
|
||||
* everyone will wait for another cycle of variable timeouts to expire.
|
||||
*
|
||||
* These determine how long it will take to elect a leader once there's
|
||||
* no evidence of a server (no leader quorum blocks on mount; heartbeat
|
||||
* timeout expired.)
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_ELECT_MIN_MS 250
|
||||
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
|
||||
|
||||
/*
|
||||
* Once a leader is elected they send out heartbeats at regular
|
||||
* intervals to force members to wait the much longer heartbeat timeout.
|
||||
* Once heartbeat timeout expires without receiving a heartbeat they'll
|
||||
* switch over the performing elections.
|
||||
*
|
||||
* These determine how long it could take members to notice that a
|
||||
* leader has gone silent and start to elect a new leader.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct scoutfs_quorum_message {
|
||||
__le64 fsid;
|
||||
__le64 blkno;
|
||||
__le64 version;
|
||||
__le64 term;
|
||||
__le64 write_nr;
|
||||
__le64 voter_rid;
|
||||
__le64 vote_for_rid;
|
||||
__u8 type;
|
||||
__u8 from;
|
||||
__u8 __pad[2];
|
||||
__le32 crc;
|
||||
__u8 log_nr;
|
||||
__u8 __pad[3];
|
||||
struct scoutfs_quorum_log {
|
||||
__le64 term;
|
||||
__le64 rid;
|
||||
struct scoutfs_inet_addr addr;
|
||||
} log[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_LOG_MAX \
|
||||
((SCOUTFS_BLOCK_SM_SIZE - sizeof(struct scoutfs_quorum_block)) / \
|
||||
sizeof(struct scoutfs_quorum_log))
|
||||
/* a candidate requests a vote */
|
||||
#define SCOUTFS_QUORUM_MSG_REQUEST_VOTE 0
|
||||
/* followers send votes to candidates */
|
||||
#define SCOUTFS_QUORUM_MSG_VOTE 1
|
||||
/* elected leaders broadcast heartbeats to delay elections */
|
||||
#define SCOUTFS_QUORUM_MSG_HEARTBEAT 2
|
||||
/* leaders broadcast as they leave to break heartbeat timeout */
|
||||
#define SCOUTFS_QUORUM_MSG_RESIGNATION 3
|
||||
#define SCOUTFS_QUORUM_MSG_INVALID 4
|
||||
|
||||
/*
|
||||
* The version is currently always 0, but will be used by mounts to
|
||||
* discover that membership has changed.
|
||||
*/
|
||||
struct scoutfs_quorum_config {
|
||||
__le64 version;
|
||||
struct scoutfs_quorum_slot {
|
||||
struct scoutfs_inet_addr addr;
|
||||
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 term;
|
||||
__le64 random_write_mark;
|
||||
__le64 flags;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 rid;
|
||||
struct scoutfs_timespec ts;
|
||||
} write, update_term, set_leader, clear_leader, fenced;
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
|
||||
|
||||
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
|
||||
|
||||
@@ -597,12 +630,7 @@ struct scoutfs_super_block {
|
||||
__le64 total_data_blocks;
|
||||
__le64 first_data_blkno;
|
||||
__le64 last_data_blkno;
|
||||
__le64 quorum_fenced_term;
|
||||
__le64 quorum_server_term;
|
||||
__le64 unmount_barrier;
|
||||
__u8 quorum_count;
|
||||
__u8 __pad[7];
|
||||
struct scoutfs_inet_addr server_addr;
|
||||
struct scoutfs_quorum_config qconf;
|
||||
struct scoutfs_alloc_root meta_alloc[2];
|
||||
struct scoutfs_alloc_root data_alloc;
|
||||
struct scoutfs_alloc_list_head server_meta_avail[2];
|
||||
@@ -736,12 +764,6 @@ enum scoutfs_dentry_type {
|
||||
* the same serer after receiving a greeting response and to a new
|
||||
* server after failover.
|
||||
*
|
||||
* @unmount_barrier: Incremented every time the remaining majority of
|
||||
* quorum members all agree to leave. The server tells a quorum member
|
||||
* the value that it's connecting under so that if the client sees the
|
||||
* value increase in the super block then it knows that the server has
|
||||
* processed its farewell and can safely unmount.
|
||||
*
|
||||
* @rid: The client's random id that was generated once as the mount
|
||||
* started up. This identifies a specific remote mount across
|
||||
* connections and servers. It's set to the client's rid in both the
|
||||
@@ -751,13 +773,12 @@ struct scoutfs_net_greeting {
|
||||
__le64 fsid;
|
||||
__le64 version;
|
||||
__le64 server_term;
|
||||
__le64 unmount_barrier;
|
||||
__le64 rid;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_GREETING_FLAG_FAREWELL (1 << 0)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_VOTER (1 << 1)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_QUORUM (1 << 1)
|
||||
#define SCOUTFS_NET_GREETING_FLAG_INVALID (~(__u64)0 << 2)
|
||||
|
||||
/*
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include "super.h"
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_server_addr, "server_addr=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
@@ -43,46 +43,6 @@ u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The caller's string is null terminted and can be clobbered */
|
||||
static int parse_ipv4(struct super_block *sb, char *str,
|
||||
struct sockaddr_in *sin)
|
||||
{
|
||||
unsigned long port = 0;
|
||||
__be32 addr;
|
||||
char *c;
|
||||
int ret;
|
||||
|
||||
/* null term port, if specified */
|
||||
c = strchr(str, ':');
|
||||
if (c)
|
||||
*c = '\0';
|
||||
|
||||
/* parse addr */
|
||||
addr = in_aton(str);
|
||||
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
|
||||
ipv4_is_zeronet(addr) ||
|
||||
ipv4_is_local_multicast(addr)) {
|
||||
scoutfs_err(sb, "invalid unicast ipv4 address: %s", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* parse port, if specified */
|
||||
if (c) {
|
||||
c++;
|
||||
ret = kstrtoul(c, 0, &port);
|
||||
if (ret != 0 || port == 0 || port >= U16_MAX) {
|
||||
scoutfs_err(sb, "invalid port in ipv4 address: %s", c);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_addr.s_addr = addr;
|
||||
sin->sin_port = cpu_to_be16(port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
{
|
||||
@@ -132,14 +92,15 @@ out:
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
{
|
||||
char ipstr[INET_ADDRSTRLEN + 1];
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int nr;
|
||||
int token;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
@@ -147,12 +108,23 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_server_addr:
|
||||
case Opt_quorum_slot_nr:
|
||||
|
||||
match_strlcpy(ipstr, args, ARRAY_SIZE(ipstr));
|
||||
ret = parse_ipv4(sb, ipstr, &parsed->server_addr);
|
||||
if (ret < 0)
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
parsed->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@
|
||||
#include "format.h"
|
||||
|
||||
enum scoutfs_mount_options {
|
||||
Opt_server_addr,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
struct mount_options {
|
||||
struct sockaddr_in server_addr;
|
||||
int quorum_slot_nr;
|
||||
char *metadev_path;
|
||||
};
|
||||
|
||||
|
||||
1599
kmod/src/quorum.c
1599
kmod/src/quorum.c
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,15 @@
|
||||
#ifndef _SCOUTFS_QUORUM_H_
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_election(struct super_block *sb, ktime_t timeout_abs,
|
||||
u64 prev_term, u64 *elected_term);
|
||||
void scoutfs_quorum_clear_leader(struct super_block *sb);
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1797,118 +1797,69 @@ TRACE_EVENT(scoutfs_lock_message,
|
||||
__entry->old_mode, __entry->new_mode)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quorum_message_class,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election,
|
||||
TP_PROTO(struct super_block *sb, u64 prev_term),
|
||||
|
||||
TP_ARGS(sb, prev_term),
|
||||
TP_ARGS(sb, term, type, nr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, prev_term)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->prev_term = prev_term;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" prev_term %llu",
|
||||
SCSB_TRACE_ARGS, __entry->prev_term)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election_ret,
|
||||
TP_PROTO(struct super_block *sb, int ret, u64 elected_term),
|
||||
|
||||
TP_ARGS(sb, ret, elected_term),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, ret)
|
||||
__field(__u64, elected_term)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ret = ret;
|
||||
__entry->elected_term = elected_term;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ret %d elected_term %llu",
|
||||
SCSB_TRACE_ARGS, __entry->ret, __entry->elected_term)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_election_vote,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, u64 vote_for_rid,
|
||||
int votes, int log_cycles, int quorum_count),
|
||||
|
||||
TP_ARGS(sb, role, term, vote_for_rid, votes, log_cycles, quorum_count),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, role)
|
||||
__field(__u64, term)
|
||||
__field(__u64, vote_for_rid)
|
||||
__field(int, votes)
|
||||
__field(int, log_cycles)
|
||||
__field(int, quorum_count)
|
||||
__field(__u8, type)
|
||||
__field(int, nr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->role = role;
|
||||
__entry->term = term;
|
||||
__entry->vote_for_rid = vote_for_rid;
|
||||
__entry->votes = votes;
|
||||
__entry->log_cycles = log_cycles;
|
||||
__entry->quorum_count = quorum_count;
|
||||
__entry->type = type;
|
||||
__entry->nr = nr;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" role %d term %llu vote_for_rid %016llx votes %d log_cycles %d quorum_count %d",
|
||||
SCSB_TRACE_ARGS, __entry->role, __entry->term,
|
||||
__entry->vote_for_rid, __entry->votes, __entry->log_cycles,
|
||||
__entry->quorum_count)
|
||||
TP_printk(SCSBF" term %llu type %u nr %d",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->type, __entry->nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_send_message,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
TP_ARGS(sb, term, type, nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_recv_message,
|
||||
TP_PROTO(struct super_block *sb, u64 term, u8 type, int nr),
|
||||
TP_ARGS(sb, term, type, nr)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quorum_block_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TRACE_EVENT(scoutfs_quorum_loop,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, int vote_for,
|
||||
unsigned long vote_bits, struct timespec64 timeout),
|
||||
|
||||
TP_ARGS(sb, blk),
|
||||
TP_ARGS(sb, role, term, vote_for, vote_bits, timeout),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(__u64, term)
|
||||
__field(__u64, write_nr)
|
||||
__field(__u64, voter_rid)
|
||||
__field(__u64, vote_for_rid)
|
||||
__field(__u32, crc)
|
||||
__field(__u8, log_nr)
|
||||
__field(int, role)
|
||||
__field(int, vote_for)
|
||||
__field(unsigned long, vote_bits)
|
||||
__field(unsigned long, vote_count)
|
||||
__field(unsigned long long, timeout_sec)
|
||||
__field(int, timeout_nsec)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = le64_to_cpu(blk->blkno);
|
||||
__entry->term = le64_to_cpu(blk->term);
|
||||
__entry->write_nr = le64_to_cpu(blk->write_nr);
|
||||
__entry->voter_rid = le64_to_cpu(blk->voter_rid);
|
||||
__entry->vote_for_rid = le64_to_cpu(blk->vote_for_rid);
|
||||
__entry->crc = le32_to_cpu(blk->crc);
|
||||
__entry->log_nr = blk->log_nr;
|
||||
__entry->term = term;
|
||||
__entry->role = role;
|
||||
__entry->vote_for = vote_for;
|
||||
__entry->vote_bits = vote_bits;
|
||||
__entry->vote_count = hweight_long(vote_bits);
|
||||
__entry->timeout_sec = timeout.tv_sec;
|
||||
__entry->timeout_nsec = timeout.tv_nsec;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu term %llu write_nr %llu voter_rid %016llx vote_for_rid %016llx crc 0x%08x log_nr %u",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->term,
|
||||
__entry->write_nr, __entry->voter_rid, __entry->vote_for_rid,
|
||||
__entry->crc, __entry->log_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_read_block,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TP_ARGS(sb, blk)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quorum_block_class, scoutfs_quorum_write_block,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_quorum_block *blk),
|
||||
TP_ARGS(sb, blk)
|
||||
TP_printk(SCSBF" term %llu role %d vote_for %d vote_bits 0x%lx vote_count %lu timeout %llu.%u",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->role,
|
||||
__entry->vote_for, __entry->vote_bits, __entry->vote_count,
|
||||
__entry->timeout_sec, __entry->timeout_nsec)
|
||||
);
|
||||
|
||||
/*
|
||||
|
||||
@@ -59,7 +59,6 @@ struct server_info {
|
||||
int err;
|
||||
bool shutting_down;
|
||||
struct completion start_comp;
|
||||
struct sockaddr_in listen_sin;
|
||||
u64 term;
|
||||
struct scoutfs_net_connection *conn;
|
||||
|
||||
@@ -75,7 +74,7 @@ struct server_info {
|
||||
unsigned long nr_clients;
|
||||
|
||||
/* track clients waiting in unmmount for farewell response */
|
||||
struct mutex farewell_mutex;
|
||||
spinlock_t farewell_lock;
|
||||
struct list_head farewell_requests;
|
||||
struct work_struct farewell_work;
|
||||
|
||||
@@ -92,6 +91,7 @@ struct server_info {
|
||||
|
||||
struct mutex logs_mutex;
|
||||
struct mutex srch_mutex;
|
||||
struct mutex mounted_clients_mutex;
|
||||
|
||||
/* stable versions stored from commits, given in locks and rpcs */
|
||||
seqcount_t roots_seqcount;
|
||||
@@ -1031,15 +1031,20 @@ static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_mounted_client_btree_val mcv;
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_mounted_client_key(&key, rid);
|
||||
mcv.flags = 0;
|
||||
if (gr_flags & SCOUTFS_NET_GREETING_FLAG_VOTER)
|
||||
mcv.flags |= SCOUTFS_MOUNTED_CLIENT_VOTER;
|
||||
if (gr_flags & SCOUTFS_NET_GREETING_FLAG_QUORUM)
|
||||
mcv.flags |= SCOUTFS_MOUNTED_CLIENT_QUORUM;
|
||||
|
||||
return scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key, &mcv,
|
||||
sizeof(mcv));
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_insert(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key, &mcv,
|
||||
sizeof(mcv));
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1047,9 +1052,6 @@ static int insert_mounted_client(struct super_block *sb, u64 rid,
|
||||
* removed if we're processing a farewell on behalf of a client that
|
||||
* already had a previous server process its farewell.
|
||||
*
|
||||
* When we remove the last mounted client that's voting we write a new
|
||||
* quorum block with the updated unmount_barrier.
|
||||
*
|
||||
* The caller has to serialize with farewell processing.
|
||||
*/
|
||||
static int delete_mounted_client(struct super_block *sb, u64 rid)
|
||||
@@ -1061,8 +1063,10 @@ static int delete_mounted_client(struct super_block *sb, u64 rid)
|
||||
|
||||
init_mounted_client_key(&key, rid);
|
||||
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_delete(sb, &server->alloc, &server->wri,
|
||||
&super->mounted_clients, &key);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
@@ -1149,7 +1153,6 @@ static int server_greeting(struct super_block *sb,
|
||||
struct scoutfs_net_greeting *gr = arg;
|
||||
struct scoutfs_net_greeting greet;
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
__le64 umb = 0;
|
||||
bool reconnecting;
|
||||
bool first_contact;
|
||||
bool farewell;
|
||||
@@ -1182,19 +1185,11 @@ static int server_greeting(struct super_block *sb,
|
||||
if (ret < 0)
|
||||
goto send_err;
|
||||
|
||||
spin_lock(&server->lock);
|
||||
umb = super->unmount_barrier;
|
||||
spin_unlock(&server->lock);
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
ret = insert_mounted_client(sb, le64_to_cpu(gr->rid),
|
||||
le64_to_cpu(gr->flags));
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
queue_work(server->wq, &server->farewell_work);
|
||||
} else {
|
||||
umb = gr->unmount_barrier;
|
||||
}
|
||||
|
||||
send_err:
|
||||
@@ -1203,7 +1198,6 @@ send_err:
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(server->term);
|
||||
greet.unmount_barrier = umb;
|
||||
greet.rid = gr->rid;
|
||||
greet.flags = 0;
|
||||
|
||||
@@ -1259,19 +1253,17 @@ static bool invalid_mounted_client_item(struct scoutfs_btree_item_ref *iref)
|
||||
|
||||
/*
|
||||
* This work processes farewell requests asynchronously. Requests from
|
||||
* voting clients can be held until only the final quorum remains and
|
||||
* quorum members can be held until only the final majority remains and
|
||||
* they've all sent farewell requests.
|
||||
*
|
||||
* When we remove the last mounted client record for the last voting
|
||||
* client then we increase the unmount_barrier and write it to the super
|
||||
* block. If voting clients don't get their farewell response they'll
|
||||
* see the greater umount_barrier in the super and will know that their
|
||||
* farewell has been processed and that they can exit.
|
||||
* A client can be disconnected before receiving our farewell response.
|
||||
* Before reconnecting they check for their mounted client item, if it's
|
||||
* been removed then they know that their farewell has been processed
|
||||
* and that they finish unmounting without reconnecting.
|
||||
*
|
||||
* Responses that are waiting for clients who aren't voting are
|
||||
* immediately sent. Clients that don't have a mounted client record
|
||||
* have already had their farewell processed by another server and can
|
||||
* proceed.
|
||||
* Responses for clients who aren't quorum members are immediately sent.
|
||||
* Clients that don't have a mounted client record have already had
|
||||
* their farewell processed by another server and can proceed.
|
||||
*
|
||||
* Farewell responses are unique in that sending them causes the server
|
||||
* to shutdown the connection to the client next time the socket
|
||||
@@ -1293,56 +1285,26 @@ static void farewell_worker(struct work_struct *work)
|
||||
struct farewell_request *tmp;
|
||||
struct farewell_request *fw;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
unsigned int nr_unmounting = 0;
|
||||
unsigned int nr_mounted = 0;
|
||||
unsigned int quo_reqs = 0;
|
||||
unsigned int quo_mnts = 0;
|
||||
unsigned int non_mnts = 0;
|
||||
struct scoutfs_key key;
|
||||
LIST_HEAD(reqs);
|
||||
LIST_HEAD(send);
|
||||
bool deleted = false;
|
||||
bool voting;
|
||||
bool more_reqs;
|
||||
int ret;
|
||||
|
||||
/* grab all the requests that are waiting */
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_splice_init(&server->farewell_requests, &reqs);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
/* count how many reqs requests are from voting clients */
|
||||
nr_unmounting = 0;
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
init_mounted_client_key(&key, fw->rid);
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
mcv = iref.val;
|
||||
voting = (mcv->flags & SCOUTFS_MOUNTED_CLIENT_VOTER) != 0;
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
|
||||
if (!voting) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
|
||||
nr_unmounting++;
|
||||
}
|
||||
|
||||
/* see how many mounted clients could vote for quorum */
|
||||
/* first count mounted clients who could send requests */
|
||||
init_mounted_client_key(&key, 0);
|
||||
for (;;) {
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_next(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
@@ -1356,23 +1318,62 @@ static void farewell_worker(struct work_struct *work)
|
||||
key = *iref.key;
|
||||
mcv = iref.val;
|
||||
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_VOTER)
|
||||
nr_mounted++;
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_QUORUM)
|
||||
quo_mnts++;
|
||||
else
|
||||
non_mnts++;
|
||||
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
/* send as many responses as we can to maintain quorum */
|
||||
while ((fw = list_first_entry_or_null(&reqs, struct farewell_request,
|
||||
entry)) &&
|
||||
(nr_mounted > super->quorum_count ||
|
||||
nr_unmounting >= nr_mounted)) {
|
||||
/* walk requests, checking their mounted client items */
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
init_mounted_client_key(&key, fw->rid);
|
||||
mutex_lock(&server->mounted_clients_mutex);
|
||||
ret = scoutfs_btree_lookup(sb, &super->mounted_clients, &key,
|
||||
&iref);
|
||||
mutex_unlock(&server->mounted_clients_mutex);
|
||||
if (ret == 0 && invalid_mounted_client_item(&iref)) {
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret < 0) {
|
||||
/* missing items means we've already processed */
|
||||
if (ret == -ENOENT) {
|
||||
list_move(&fw->entry, &send);
|
||||
continue;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_move_tail(&fw->entry, &send);
|
||||
nr_mounted--;
|
||||
nr_unmounting--;
|
||||
deleted = true;
|
||||
mcv = iref.val;
|
||||
|
||||
/* count quo reqs, can always send to non-quo clients */
|
||||
if (mcv->flags & SCOUTFS_MOUNTED_CLIENT_QUORUM) {
|
||||
quo_reqs++;
|
||||
} else {
|
||||
list_move(&fw->entry, &send);
|
||||
non_mnts--;
|
||||
}
|
||||
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only requests from quorum members remain and we've counted
|
||||
* them and remaining mounts. Send responses as long as enough
|
||||
* quorum clients remain for a majority, or all the requests are
|
||||
* from the final majority of quorum clients they're the only
|
||||
* mounted clients.
|
||||
*/
|
||||
list_for_each_entry_safe(fw, tmp, &reqs, entry) {
|
||||
if ((quo_mnts > scoutfs_quorum_votes_needed(sb)) ||
|
||||
((quo_reqs == quo_mnts) && (non_mnts == 0))) {
|
||||
list_move_tail(&fw->entry, &send);
|
||||
quo_mnts--;
|
||||
quo_reqs--;
|
||||
}
|
||||
}
|
||||
|
||||
/* process and send farewell responses */
|
||||
@@ -1381,24 +1382,12 @@ static void farewell_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* delete mounted client last, client reconnect looks for it */
|
||||
ret = scoutfs_lock_server_farewell(sb, fw->rid) ?:
|
||||
remove_trans_seq(sb, fw->rid) ?:
|
||||
reclaim_log_trees(sb, fw->rid) ?:
|
||||
delete_mounted_client(sb, fw->rid) ?:
|
||||
cancel_srch_compact(sb, fw->rid);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* update the unmount barrier if we deleted all voting clients */
|
||||
if (deleted && nr_mounted == 0) {
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
le64_add_cpu(&super->unmount_barrier, 1);
|
||||
cancel_srch_compact(sb, fw->rid) ?:
|
||||
delete_mounted_client(sb, fw->rid);
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
if (ret)
|
||||
@@ -1420,11 +1409,11 @@ static void farewell_worker(struct work_struct *work)
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
more_reqs = !list_empty(&server->farewell_requests);
|
||||
list_splice_init(&reqs, &server->farewell_requests);
|
||||
list_splice_init(&send, &server->farewell_requests);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
if (ret < 0)
|
||||
stop_server(server);
|
||||
@@ -1437,15 +1426,17 @@ static void free_farewell_requests(struct super_block *sb, u64 rid)
|
||||
struct server_info *server = SCOUTFS_SB(sb)->server_info;
|
||||
struct farewell_request *tmp;
|
||||
struct farewell_request *fw;
|
||||
LIST_HEAD(rid_list);
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_for_each_entry_safe(fw, tmp, &server->farewell_requests, entry) {
|
||||
if (rid == 0 || fw->rid == rid) {
|
||||
list_del_init(&fw->entry);
|
||||
kfree(fw);
|
||||
}
|
||||
if (rid == 0 || fw->rid == rid)
|
||||
list_move_tail(&fw->entry, &rid_list);
|
||||
}
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
list_for_each_entry_safe(fw, tmp, &rid_list, entry)
|
||||
kfree(fw);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1479,9 +1470,9 @@ static int server_farewell(struct super_block *sb,
|
||||
fw->rid = rid;
|
||||
fw->net_id = id;
|
||||
|
||||
mutex_lock(&server->farewell_mutex);
|
||||
spin_lock(&server->farewell_lock);
|
||||
list_add_tail(&fw->entry, &server->farewell_requests);
|
||||
mutex_unlock(&server->farewell_mutex);
|
||||
spin_unlock(&server->farewell_lock);
|
||||
|
||||
queue_farewell_work(server);
|
||||
|
||||
@@ -1548,18 +1539,17 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
struct super_block *sb = server->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_net_connection *conn = NULL;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct sockaddr_in sin;
|
||||
LIST_HEAD(conn_list);
|
||||
u64 max_vers;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
trace_scoutfs_server_work_enter(sb, 0, 0);
|
||||
|
||||
sin = server->listen_sin;
|
||||
|
||||
scoutfs_quorum_slot_sin(super, opts->quorum_slot_nr, &sin);
|
||||
scoutfs_info(sb, "server setting up at "SIN_FMT, SIN_ARG(&sin));
|
||||
|
||||
conn = scoutfs_net_alloc_conn(sb, server_notify_up, server_notify_down,
|
||||
@@ -1579,9 +1569,6 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* start up the server subsystems before accepting */
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
@@ -1621,19 +1608,6 @@ static void scoutfs_server_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto shutdown;
|
||||
|
||||
/*
|
||||
* Write our address in the super before it's possible for net
|
||||
* processing to start writing the super as part of
|
||||
* transactions. In theory clients could be trying to connect
|
||||
* to our address without having seen it in the super (maybe
|
||||
* they saw it a long time ago).
|
||||
*/
|
||||
scoutfs_addr_from_sin(&super->server_addr, &sin);
|
||||
super->quorum_server_term = cpu_to_le64(server->term);
|
||||
ret = scoutfs_write_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto shutdown;
|
||||
|
||||
/* start accepting connections and processing work */
|
||||
server->conn = conn;
|
||||
scoutfs_net_listen(sb, conn);
|
||||
@@ -1660,30 +1634,14 @@ shutdown:
|
||||
scoutfs_lock_server_destroy(sb);
|
||||
|
||||
out:
|
||||
scoutfs_quorum_clear_leader(sb);
|
||||
scoutfs_net_free_conn(sb, conn);
|
||||
|
||||
/* let quorum know that we've shutdown */
|
||||
scoutfs_quorum_server_shutdown(sb);
|
||||
|
||||
scoutfs_info(sb, "server stopped at "SIN_FMT, SIN_ARG(&sin));
|
||||
trace_scoutfs_server_work_exit(sb, 0, ret);
|
||||
|
||||
/*
|
||||
* Always try to clear our presence in the super so that we're
|
||||
* not fenced. We do this last because other mounts will try to
|
||||
* reach quorum the moment they see zero here. The later we do
|
||||
* this the longer we have to finish shutdown while clients
|
||||
* timeout.
|
||||
*/
|
||||
err = scoutfs_read_super(sb, super);
|
||||
if (err == 0) {
|
||||
super->quorum_fenced_term = cpu_to_le64(server->term);
|
||||
memset(&super->server_addr, 0, sizeof(super->server_addr));
|
||||
err = scoutfs_write_super(sb, super);
|
||||
}
|
||||
if (err < 0) {
|
||||
scoutfs_err(sb, "failed to clear election term %llu at "SIN_FMT", this mount could be fenced",
|
||||
server->term, SIN_ARG(&sin));
|
||||
}
|
||||
|
||||
server->err = ret;
|
||||
complete(&server->start_comp);
|
||||
}
|
||||
@@ -1693,14 +1651,12 @@ out:
|
||||
* the super block's fence_term has been set to the new server's term so
|
||||
* that it won't be fenced.
|
||||
*/
|
||||
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
|
||||
u64 term)
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
|
||||
server->err = 0;
|
||||
server->shutting_down = false;
|
||||
server->listen_sin = *sin;
|
||||
server->term = term;
|
||||
init_completion(&server->start_comp);
|
||||
|
||||
@@ -1753,12 +1709,13 @@ int scoutfs_server_setup(struct super_block *sb)
|
||||
INIT_WORK(&server->commit_work, scoutfs_server_commit_func);
|
||||
init_rwsem(&server->seq_rwsem);
|
||||
INIT_LIST_HEAD(&server->clients);
|
||||
mutex_init(&server->farewell_mutex);
|
||||
spin_lock_init(&server->farewell_lock);
|
||||
INIT_LIST_HEAD(&server->farewell_requests);
|
||||
INIT_WORK(&server->farewell_work, farewell_worker);
|
||||
mutex_init(&server->alloc_mutex);
|
||||
mutex_init(&server->logs_mutex);
|
||||
mutex_init(&server->srch_mutex);
|
||||
mutex_init(&server->mounted_clients_mutex);
|
||||
seqcount_init(&server->roots_seqcount);
|
||||
|
||||
server->wq = alloc_workqueue("scoutfs_server",
|
||||
|
||||
@@ -69,8 +69,7 @@ int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
|
||||
struct sockaddr_in;
|
||||
struct scoutfs_quorum_elected_info;
|
||||
int scoutfs_server_start(struct super_block *sb, struct sockaddr_in *sin,
|
||||
u64 term);
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
|
||||
|
||||
100
kmod/src/super.c
100
kmod/src/super.c
@@ -176,7 +176,8 @@ static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
seq_printf(seq, ",server_addr="SIN_FMT, SIN_ARG(&opts->server_addr));
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
@@ -192,20 +193,19 @@ static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t server_addr_show(struct kobject *kobj,
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, SIN_FMT"\n",
|
||||
SIN_ARG(&opts->server_addr));
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(server_addr);
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(server_addr),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -257,15 +257,12 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
|
||||
/* the server locks the listen address and compacts */
|
||||
scoutfs_quorum_destroy(sb);
|
||||
scoutfs_lock_shutdown(sb);
|
||||
scoutfs_server_destroy(sb);
|
||||
scoutfs_net_destroy(sb);
|
||||
scoutfs_lock_destroy(sb);
|
||||
|
||||
/* server clears quorum leader flag during shutdown */
|
||||
scoutfs_quorum_destroy(sb);
|
||||
|
||||
scoutfs_block_destroy(sb);
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
@@ -309,6 +306,34 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
sizeof(struct scoutfs_super_block));
|
||||
}
|
||||
|
||||
static bool invalid_blkno_limits(struct super_block *sb, char *which,
|
||||
u64 start, __le64 first, __le64 last,
|
||||
struct block_device *bdev, int shift)
|
||||
{
|
||||
u64 blkno;
|
||||
|
||||
if (le64_to_cpu(first) < start) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
|
||||
which, le64_to_cpu(first), start);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(first) > le64_to_cpu(last)) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
|
||||
which, le64_to_cpu(first), which, le64_to_cpu(last));
|
||||
return true;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
|
||||
if (le64_to_cpu(last) > blkno) {
|
||||
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
|
||||
which, le64_to_cpu(last), blkno);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read super, specifying bdev.
|
||||
*/
|
||||
@@ -316,9 +341,9 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
struct block_device *bdev,
|
||||
struct scoutfs_super_block *super_res)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super;
|
||||
__le32 calc;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -362,48 +387,17 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
|
||||
/* XXX do we want more rigorous invalid super checking? */
|
||||
|
||||
if (super->quorum_count == 0 ||
|
||||
super->quorum_count > SCOUTFS_QUORUM_MAX_COUNT) {
|
||||
scoutfs_err(sb, "super block has invalid quorum count %u, must be > 0 and <= %u",
|
||||
super->quorum_count, SCOUTFS_QUORUM_MAX_COUNT);
|
||||
if (invalid_blkno_limits(sb, "meta",
|
||||
SCOUTFS_META_DEV_START_BLKNO,
|
||||
super->first_meta_blkno,
|
||||
super->last_meta_blkno, sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
invalid_blkno_limits(sb, "data",
|
||||
SCOUTFS_DATA_DEV_START_BLKNO,
|
||||
super->first_data_blkno,
|
||||
super->last_data_blkno, sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
blkno = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT;
|
||||
if (le64_to_cpu(super->first_meta_blkno) < blkno) {
|
||||
scoutfs_err(sb, "super block first meta blkno %llu is within quorum blocks",
|
||||
le64_to_cpu(super->first_meta_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->first_meta_blkno) >
|
||||
le64_to_cpu(super->last_meta_blkno)) {
|
||||
scoutfs_err(sb, "super block first meta blkno %llu is greater than last meta blkno %llu",
|
||||
le64_to_cpu(super->first_meta_blkno),
|
||||
le64_to_cpu(super->last_meta_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->first_data_blkno) >
|
||||
le64_to_cpu(super->last_data_blkno)) {
|
||||
scoutfs_err(sb, "super block first data blkno %llu is greater than last data blkno %llu",
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(sb->s_bdev->bd_inode) >>
|
||||
SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
if (le64_to_cpu(super->last_data_blkno) > blkno) {
|
||||
scoutfs_err(sb, "super block last data blkno %llu is outsite device size last blkno %llu",
|
||||
le64_to_cpu(super->last_data_blkno), blkno);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -599,8 +593,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_lock_setup(sb) ?:
|
||||
scoutfs_net_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_server_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_client_setup(sb) ?:
|
||||
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
|
||||
&sbi->rid_lock) ?:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
t_filter_fs()
|
||||
{
|
||||
sed -e 's@mnt/test\.[0-9]*@mnt/test@g' \
|
||||
-e 's@Device: [a-fA-F0-7]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
}
|
||||
|
||||
#
|
||||
|
||||
@@ -99,6 +99,19 @@ t_first_client_nr()
|
||||
t_fail "t_first_client_nr didn't find any clients"
|
||||
}
|
||||
|
||||
#
|
||||
# The number of quorum members needed to form a majority to start the
|
||||
# server.
|
||||
#
|
||||
t_majority_count()
|
||||
{
|
||||
if [ "$T_QUORUM" -lt 3 ]; then
|
||||
echo 1
|
||||
else
|
||||
echo $(((T_QUORUM / 2) + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
t_mount()
|
||||
{
|
||||
local nr="$1"
|
||||
@@ -116,7 +129,7 @@ t_umount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount \$T_DB$i
|
||||
eval t_quiet umount \$T_M$i
|
||||
}
|
||||
|
||||
#
|
||||
|
||||
@@ -52,11 +52,11 @@ $(basename $0) options:
|
||||
| the file system to be tested. Will be clobbered by -m mkfs.
|
||||
-m | Run mkfs on the device before mounting and running
|
||||
| tests. Implies unmounting existing mounts first.
|
||||
-n | The number of devices and mounts to test.
|
||||
-n <nr> | The number of devices and mounts to test.
|
||||
-P | Enable trace_printk.
|
||||
-p | Exit script after preparing mounts only, don't run tests.
|
||||
-q <nr> | Specify the quorum count needed to mount. This is
|
||||
| used when running mkfs and is needed by a few tests.
|
||||
-q <nr> | The first <nr> mounts will be quorum members. Must be
|
||||
| at least 1 and no greater than -n number of mounts.
|
||||
-r <dir> | Specify the directory in which to store results of
|
||||
| test runs. The directory will be created if it doesn't
|
||||
| exist. Previous results will be deleted as each test runs.
|
||||
@@ -199,7 +199,6 @@ test -e "$T_EX_META_DEV" || die "extra meta device -f '$T_EX_META_DEV' doesn't e
|
||||
test -n "$T_EX_DATA_DEV" || die "must specify -e extra data device"
|
||||
test -e "$T_EX_DATA_DEV" || die "extra data device -e '$T_EX_DATA_DEV' doesn't exist"
|
||||
|
||||
test -n "$T_MKFS" -a -z "$T_QUORUM" && die "mkfs (-m) requires quorum (-q)"
|
||||
test -n "$T_RESULTS" || die "must specify -r results dir"
|
||||
test -n "$T_XFSTESTS_REPO" -a -z "$T_XFSTESTS_BRANCH" -a -z "$T_SKIP_CHECKOUT" && \
|
||||
die "-X xfstests repo requires -x xfstests branch"
|
||||
@@ -209,6 +208,12 @@ test -n "$T_XFSTESTS_BRANCH" -a -z "$T_XFSTESTS_REPO" -a -z "$T_SKIP_CHECKOUT" &
|
||||
test -n "$T_NR_MOUNTS" || die "must specify -n nr mounts"
|
||||
test "$T_NR_MOUNTS" -ge 1 -a "$T_NR_MOUNTS" -le 8 || \
|
||||
die "-n nr mounts must be >= 1 and <= 8"
|
||||
test -n "$T_QUORUM" || \
|
||||
die "must specify -q number of mounts that are quorum members"
|
||||
test "$T_QUORUM" -ge "1" || \
|
||||
die "-q quorum mmembers must be at least 1"
|
||||
test "$T_QUORUM" -le "$T_NR_MOUNTS" || \
|
||||
die "-q quorum mmembers must not be greater than -n mounts"
|
||||
|
||||
# top level paths
|
||||
T_KMOD=$(realpath "$(dirname $0)/../kmod")
|
||||
@@ -307,8 +312,14 @@ if [ -n "$T_UNMOUNT" ]; then
|
||||
unmount_all
|
||||
fi
|
||||
|
||||
quo=""
|
||||
if [ -n "$T_MKFS" ]; then
|
||||
cmd scoutfs mkfs -Q "$T_QUORUM" "$T_META_DEVICE" "$T_DATA_DEVICE" -f
|
||||
for i in $(seq -0 $((T_QUORUM - 1))); do
|
||||
quo="$quo -Q $i,127.0.0.1,$((42000 + i))"
|
||||
done
|
||||
|
||||
msg "making new filesystem with $T_QUORUM quorum members"
|
||||
cmd scoutfs mkfs -f $quo "$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
fi
|
||||
|
||||
if [ -n "$T_INSMOD" ]; then
|
||||
@@ -365,8 +376,12 @@ for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="/mnt/test.$i"
|
||||
test -d "$dir" || cmd mkdir -p "$dir"
|
||||
|
||||
opts="-o metadev_path=$meta_dev"
|
||||
if [ "$i" -lt "$T_QUORUM" ]; then
|
||||
opts="$opts,quorum_slot_nr=$i"
|
||||
fi
|
||||
|
||||
msg "mounting $meta_dev|$data_dev on $dir"
|
||||
opts="-o server_addr=127.0.0.1,metadev_path=$meta_dev"
|
||||
cmd mount -t scoutfs $opts "$data_dev" "$dir" &
|
||||
|
||||
p="$!"
|
||||
|
||||
@@ -25,8 +25,7 @@ lock-conflicting-batch-commit.sh
|
||||
cross-mount-data-free.sh
|
||||
persistent-item-vers.sh
|
||||
setup-error-teardown.sh
|
||||
# failing in jenkins pr runners, zab's working on it
|
||||
#umount-unmount-race.sh
|
||||
mount-unmount-race.sh
|
||||
createmany-parallel-mounts.sh
|
||||
archive-light-cycle.sh
|
||||
stale-btree-read.sh
|
||||
|
||||
@@ -4,25 +4,23 @@
|
||||
# At the start of the test all mounts are mounted. Each iteration
|
||||
# randomly decides to change each mount or to leave it alone.
|
||||
#
|
||||
# They create dirty items before unmounting to encourage compaction
|
||||
# while unmounting
|
||||
# Each iteration create dirty items across the mounts randomly, giving
|
||||
# unmount some work to do.
|
||||
#
|
||||
# For this test to be meaningful it needs multiple mounts beyond the
|
||||
# quorum set which can be racing to mount and unmount. A reasonable
|
||||
# config would be 5 mounts with 3 quorum. But the test will run with
|
||||
# whatever count it finds.
|
||||
# quorum majority which can be racing to mount and unmount. A
|
||||
# reasonable config would be 5 mounts with 3 quorum members. But the
|
||||
# test will run with whatever count it finds.
|
||||
#
|
||||
# This assumes that all the mounts are configured as voting servers. We
|
||||
# could update it to be more clever and know that it can always safely
|
||||
# unmount mounts that aren't configured as servers.
|
||||
# The test assumes that the first mounts are the quorum members.
|
||||
#
|
||||
|
||||
# nothing to do if we can't unmount
|
||||
test "$T_NR_MOUNTS" == "$T_QUORUM" && \
|
||||
t_skip "only quorum members mounted, can't unmount"
|
||||
majority_nr=$(t_majority_count)
|
||||
quorum_nr=$T_QUORUM
|
||||
|
||||
nr_mounted=$T_NR_MOUNTS
|
||||
nr_quorum=$T_QUORUM
|
||||
cur_quorum=$quorum_nr
|
||||
test "$cur_quorum" == "$majority_nr" && \
|
||||
t_skip "all quorum members make up majority, need more mounts to unmount"
|
||||
|
||||
echo "== create per mount files"
|
||||
for i in $(t_fs_nrs); do
|
||||
@@ -55,25 +53,42 @@ while [ "$SECONDS" -lt "$END" ]; do
|
||||
fi
|
||||
|
||||
if [ "${mounted[$i]}" == 1 ]; then
|
||||
if [ "$nr_mounted" -gt "$nr_quorum" ]; then
|
||||
#
|
||||
# can always unmount non-quorum mounts,
|
||||
# can only unmount quorum members beyond majority
|
||||
#
|
||||
if [ "$i" -ge "$quorum_nr" -o \
|
||||
"$cur_quorum" -gt "$majority_nr" ]; then
|
||||
t_umount $i &
|
||||
pid=$!
|
||||
echo "umount $i pid $pid quo $cur_quorum" \
|
||||
>> $T_TMP.log
|
||||
pids="$pids $pid"
|
||||
mounted[$i]=0
|
||||
(( nr_mounted-- ))
|
||||
if [ "$i" -lt "$quorum_nr" ]; then
|
||||
(( cur_quorum-- ))
|
||||
fi
|
||||
fi
|
||||
else
|
||||
t_mount $i &
|
||||
pid=$!
|
||||
pids="$pids $pid"
|
||||
echo "mount $i pid $pid quo $cur_quorum" >> $T_TMP.log
|
||||
mounted[$i]=1
|
||||
(( nr_mounted++ ))
|
||||
if [ "$i" -lt "$quorum_nr" ]; then
|
||||
(( cur_quorum++ ))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "waiting (secs $SECONDS)" >> $T_TMP.log
|
||||
for p in $pids; do
|
||||
t_quiet wait $p
|
||||
wait $p
|
||||
rc=$?
|
||||
if [ "$rc" != 0 ]; then
|
||||
echo "waiting for pid $p returned $rc"
|
||||
t_fail "background mount/umount returned error"
|
||||
fi
|
||||
done
|
||||
echo "done waiting (secs $SECONDS))" >> $T_TMP.log
|
||||
done
|
||||
|
||||
@@ -37,17 +37,25 @@ t_quiet make
|
||||
t_quiet sync
|
||||
# pwd stays in xfstests dir to build config and run
|
||||
|
||||
#
|
||||
# Each filesystem needs specific mkfs and mount options because we put
|
||||
# quorum member addresess in mkfs options and the metadata device in
|
||||
# mount options.
|
||||
#
|
||||
cat << EOF > local.config
|
||||
export FSTYP=scoutfs
|
||||
export MKFS_OPTIONS="-Q 1"
|
||||
export MKFS_OPTIONS="-f"
|
||||
export MKFS_TEST_OPTIONS="-Q 0,127.0.0.1,42000"
|
||||
export MKFS_SCRATCH_OPTIONS="-Q 0,127.0.0.1,43000"
|
||||
export MKFS_DEV_OPTIONS="-Q 0,127.0.0.1,44000"
|
||||
export TEST_DEV=$T_DB0
|
||||
export TEST_DIR=$T_M0
|
||||
export SCRATCH_META_DEV=$T_EX_META_DEV
|
||||
export SCRATCH_DEV=$T_EX_DATA_DEV
|
||||
export SCRATCH_MNT="$T_TMPDIR/mnt.scratch"
|
||||
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_EX_META_DEV"
|
||||
export MOUNT_OPTIONS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o server_addr=127.0.0.1,metadev_path=$T_MB0"
|
||||
export SCOUTFS_SCRATCH_MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_EX_META_DEV"
|
||||
export MOUNT_OPTIONS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
export TEST_FS_MOUNT_OPTS="-o quorum_slot_nr=0,metadev_path=$T_MB0"
|
||||
EOF
|
||||
|
||||
cat << EOF > local.exclude
|
||||
|
||||
@@ -21,21 +21,19 @@ contains the filesystem's metadata.
|
||||
.sp
|
||||
This option is required.
|
||||
.TP
|
||||
.B server_addr=<ipv4:port>
|
||||
The server_addr option indicates that this mount will participate in
|
||||
quorum election to try and run a server for all the mounts of its
|
||||
filesystem. The option specifies the local TCP IPv4 address that the
|
||||
mount's elected server will listen on for connections from all other
|
||||
mounts of the filesystem.
|
||||
.B quorum_slot_nr=<number>
|
||||
The quorum_slot_nr option assigns a quorum member slot to the mount.
|
||||
The mount will use the slot assignment to claim exclusive ownership of
|
||||
the slot's configured address and an associated metadata device block.
|
||||
Each slot number must be used by only one mount at any given time.
|
||||
.sp
|
||||
The IPv4 address must be specified as a dotted quad, name resolution is
|
||||
not supported. A specific port may be provided after a seperating
|
||||
colon. If no port is specified then a random port will be chosen. The
|
||||
address will be used for the lifetime of the mount and can not be
|
||||
changed. The mount must be unmounted to specify a different address.
|
||||
When a mount is assigned a quorum slot it becomes a quorum member and
|
||||
will participate in the raft leader election process and could start
|
||||
the server for the filesystem if it is elected leader.
|
||||
.sp
|
||||
If server_addr is not specified then the mount will read the filesystem
|
||||
until it sees the address of an elected server to connect to.
|
||||
The assigned number must match one of the slots defined with \-Q options
|
||||
when the filesystem was created with mkfs. If the number assigned
|
||||
doesn't match a number created during mkfs then the mount will fail.
|
||||
.SH FURTHER READING
|
||||
A
|
||||
.B scoutfs
|
||||
|
||||
@@ -32,7 +32,7 @@ A path within a ScoutFS filesystem.
|
||||
.PD
|
||||
|
||||
.TP
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-count} NUM [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
|
||||
.BI "mkfs META-DEVICE DATA-DEVICE {-Q|--quorum-slot} NR,ADDR,PORT [-m|--max-meta-size SIZE] [-d|--max-data-size SIZE] [-f|--force]"
|
||||
.sp
|
||||
Initialize a new ScoutFS filesystem on the target devices. Since ScoutFS uses
|
||||
separate block devices for its metadata and data storage, two are required.
|
||||
@@ -57,18 +57,20 @@ a faster block device for the metadata device.
|
||||
The path to the block device to be used for ScoutFS file data. If possible, use
|
||||
a larger block device for the data device.
|
||||
.TP
|
||||
.B "-Q, --quorum-count NUM"
|
||||
The number of mounts needed to reach quorum and elect one
|
||||
to be the server. Mounts of the filesystem will hang until a quorum of
|
||||
mounts are operational.
|
||||
.sp
|
||||
Mounts with the
|
||||
.B server_addr
|
||||
mount option participate in quorum. The safest quorum number is the
|
||||
smallest majority of an odd number of participating mounts. For
|
||||
example,
|
||||
two out of three total mounts. This ensures that there can only be one
|
||||
set of mounts that can establish quorum.
|
||||
.B "-Q, --quorum-slot NR,ADDR,PORT"
|
||||
Each \-Q option configures a quorum slot. The NR specifies the number
|
||||
of the slot to configure which must be between 0 and 14. Each slot
|
||||
number must only be used once, but they can be used in any order and
|
||||
they need not be consecutive. This is to allow natural relationships
|
||||
between slot numbers and nodes which may have arbitrary numbering
|
||||
schemes. ADDR and PORT are the numerical IPv4 address and port which
|
||||
will be used as the UDP endpoint for leader elections and as the TCP
|
||||
listening address for server connections. The number of configured
|
||||
slots determines the size of the quorum of member mounts which must be
|
||||
present to start the server for the filesystem to operate. A simple
|
||||
majority is typically required, while one mount is sufficient if only
|
||||
one or two slots are configured. Until the majority quorum are present,
|
||||
all mounts will hang waiting for a server to connect to.
|
||||
.TP
|
||||
.B "-m, --max-meta-size SIZE"
|
||||
Limit the space used by ScoutFS on the metadata device to the
|
||||
|
||||
@@ -25,17 +25,13 @@ static void init_block(struct scoutfs_btree_block *bt, int level)
|
||||
*/
|
||||
void btree_init_root_single(struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 seq, __le64 fsid)
|
||||
u64 seq, u64 blkno)
|
||||
{
|
||||
root->ref.blkno = cpu_to_le64(blkno);
|
||||
root->ref.seq = cpu_to_le64(1);
|
||||
root->ref.seq = cpu_to_le64(seq);
|
||||
root->height = 1;
|
||||
|
||||
memset(bt, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
bt->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_BTREE);
|
||||
bt->hdr.fsid = fsid;
|
||||
bt->hdr.blkno = cpu_to_le64(blkno);
|
||||
bt->hdr.seq = cpu_to_le64(1);
|
||||
|
||||
init_block(bt, 0);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
void btree_init_root_single(struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 seq, __le64 fsid);
|
||||
u64 seq, u64 blkno);
|
||||
|
||||
void btree_append_item(struct scoutfs_btree_block *bt,
|
||||
struct scoutfs_key *key, void *val, int val_len);
|
||||
|
||||
182
utils/src/mkfs.c
182
utils/src/mkfs.c
@@ -32,12 +32,22 @@
|
||||
#include "leaf_item_hash.h"
|
||||
#include "blkid.h"
|
||||
|
||||
static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
|
||||
/*
|
||||
* Update the block header fields and write out the block.
|
||||
*/
|
||||
static int write_block(int fd, u32 magic, __le64 fsid, u64 seq, u64 blkno,
|
||||
int shift, struct scoutfs_block_header *hdr)
|
||||
{
|
||||
size_t size = 1ULL << shift;
|
||||
ssize_t ret;
|
||||
|
||||
ret = pwrite(fd, blk, size, blkno << shift);
|
||||
hdr->magic = cpu_to_le32(magic);
|
||||
hdr->fsid = fsid;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->seq = cpu_to_le64(seq);
|
||||
hdr->crc = cpu_to_le32(crc_block(hdr, size));
|
||||
|
||||
ret = pwrite(fd, hdr, size, blkno << shift);
|
||||
if (ret != size) {
|
||||
fprintf(stderr, "write to blkno %llu returned %zd: %s (%d)\n",
|
||||
blkno, ret, strerror(errno), errno);
|
||||
@@ -47,35 +57,18 @@ static int write_raw_block(int fd, u64 blkno, int shift, void *blk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the block's header and write it out.
|
||||
*/
|
||||
static int write_block(int fd, u64 blkno, int shift,
|
||||
struct scoutfs_super_block *super,
|
||||
struct scoutfs_block_header *hdr)
|
||||
{
|
||||
size_t size = 1ULL << shift;
|
||||
|
||||
if (super)
|
||||
*hdr = super->hdr;
|
||||
hdr->blkno = cpu_to_le64(blkno);
|
||||
hdr->crc = cpu_to_le32(crc_block(hdr, size));
|
||||
|
||||
return write_raw_block(fd, blkno, shift, hdr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write the single btree block that contains the blkno and len indexed
|
||||
* items to store the given extent, and update the root to point to it.
|
||||
*/
|
||||
static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
static int write_alloc_root(int fd, __le64 fsid,
|
||||
struct scoutfs_alloc_root *root,
|
||||
struct scoutfs_btree_block *bt,
|
||||
u64 blkno, u64 start, u64 len)
|
||||
u64 seq, u64 blkno, u64 start, u64 len)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
|
||||
btree_init_root_single(&root->root, bt, blkno, 1, super->hdr.fsid);
|
||||
btree_init_root_single(&root->root, bt, seq, blkno);
|
||||
root->total_len = cpu_to_le64(len);
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
@@ -94,19 +87,18 @@ static int write_alloc_root(struct scoutfs_super_block *super, int fd,
|
||||
key.skfl_blkno = cpu_to_le64(start);
|
||||
btree_append_item(bt, &key, NULL, 0);
|
||||
|
||||
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
|
||||
SCOUTFS_BLOCK_LG_SIZE));
|
||||
|
||||
return write_raw_block(fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
|
||||
return write_block(fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, seq, blkno,
|
||||
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
|
||||
}
|
||||
|
||||
struct mkfs_args {
|
||||
unsigned long long quorum_count;
|
||||
char *meta_device;
|
||||
char *data_device;
|
||||
unsigned long long max_meta_size;
|
||||
unsigned long long max_data_size;
|
||||
bool force;
|
||||
int nr_slots;
|
||||
struct scoutfs_quorum_slot slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -124,12 +116,14 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
struct scoutfs_inode inode;
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_btree_block *bt = NULL;
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct scoutfs_key key;
|
||||
struct timeval tv;
|
||||
int meta_fd = -1;
|
||||
int data_fd = -1;
|
||||
char uuid_str[37];
|
||||
void *zeros = NULL;
|
||||
char *indent;
|
||||
u64 blkno;
|
||||
u64 meta_size;
|
||||
u64 data_size;
|
||||
@@ -139,10 +133,12 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
u64 last_data;
|
||||
u64 meta_start;
|
||||
u64 meta_len;
|
||||
__le64 fsid;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
pseudo_random_bytes(&fsid, sizeof(fsid));
|
||||
|
||||
meta_fd = open(args->meta_device, O_RDWR | O_EXCL);
|
||||
if (meta_fd < 0) {
|
||||
@@ -191,10 +187,7 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* metadata blocks start after the quorum blocks */
|
||||
next_meta = (SCOUTFS_QUORUM_BLKNO + SCOUTFS_QUORUM_BLOCKS) >>
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT;
|
||||
/* rest of meta dev is available for metadata blocks */
|
||||
next_meta = SCOUTFS_META_DEV_START_BLKNO;
|
||||
last_meta = (meta_size >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
|
||||
/* Data blocks go on the data dev */
|
||||
first_data = SCOUTFS_DATA_DEV_START_BLKNO;
|
||||
@@ -202,9 +195,6 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
|
||||
/* partially initialize the super so we can use it to init others */
|
||||
memset(super, 0, SCOUTFS_BLOCK_SM_SIZE);
|
||||
pseudo_random_bytes(&super->hdr.fsid, sizeof(super->hdr.fsid));
|
||||
super->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_SUPER);
|
||||
super->hdr.seq = cpu_to_le64(1);
|
||||
super->version = cpu_to_le64(SCOUTFS_INTEROP_VERSION);
|
||||
uuid_generate(super->uuid);
|
||||
super->next_ino = cpu_to_le64(SCOUTFS_ROOT_INO + 1);
|
||||
@@ -215,11 +205,14 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
super->total_data_blocks = cpu_to_le64(last_data - first_data + 1);
|
||||
super->first_data_blkno = cpu_to_le64(first_data);
|
||||
super->last_data_blkno = cpu_to_le64(last_data);
|
||||
super->quorum_count = args->quorum_count;
|
||||
|
||||
assert(sizeof(args->slots) ==
|
||||
member_sizeof(struct scoutfs_super_block, qconf.slots));
|
||||
memcpy(super->qconf.slots, args->slots, sizeof(args->slots));
|
||||
|
||||
/* fs root starts with root inode and its index items */
|
||||
blkno = next_meta++;
|
||||
btree_init_root_single(&super->fs_root, bt, blkno, 1, super->hdr.fsid);
|
||||
btree_init_root_single(&super->fs_root, bt, 1, blkno);
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_INODE_INDEX_ZONE;
|
||||
@@ -244,10 +237,8 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
inode.mtime.nsec = inode.atime.nsec;
|
||||
btree_append_item(bt, &key, &inode, sizeof(inode));
|
||||
|
||||
bt->hdr.crc = cpu_to_le32(crc_block(&bt->hdr,
|
||||
SCOUTFS_BLOCK_LG_SIZE));
|
||||
|
||||
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, bt);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, 1, blkno,
|
||||
SCOUTFS_BLOCK_LG_SHIFT, &bt->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -256,11 +247,6 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
lblk = (void *)bt;
|
||||
memset(lblk, 0, SCOUTFS_BLOCK_LG_SIZE);
|
||||
|
||||
lblk->hdr.magic = cpu_to_le32(SCOUTFS_BLOCK_MAGIC_ALLOC_LIST);
|
||||
lblk->hdr.fsid = super->hdr.fsid;
|
||||
lblk->hdr.blkno = cpu_to_le64(blkno);
|
||||
lblk->hdr.seq = cpu_to_le64(1);
|
||||
|
||||
meta_len = (64 * 1024 * 1024) >> SCOUTFS_BLOCK_LG_SHIFT;
|
||||
for (i = 0; i < meta_len; i++) {
|
||||
lblk->blknos[i] = cpu_to_le64(next_meta);
|
||||
@@ -268,20 +254,20 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
lblk->nr = cpu_to_le32(i);
|
||||
|
||||
super->server_meta_avail[0].ref.blkno = lblk->hdr.blkno;
|
||||
super->server_meta_avail[0].ref.seq = lblk->hdr.seq;
|
||||
super->server_meta_avail[0].ref.blkno = cpu_to_le64(blkno);
|
||||
super->server_meta_avail[0].ref.seq = cpu_to_le64(1);
|
||||
super->server_meta_avail[0].total_nr = le32_to_le64(lblk->nr);
|
||||
super->server_meta_avail[0].first_nr = lblk->nr;
|
||||
|
||||
lblk->hdr.crc = cpu_to_le32(crc_block(&bt->hdr, SCOUTFS_BLOCK_LG_SIZE));
|
||||
ret = write_raw_block(meta_fd, blkno, SCOUTFS_BLOCK_LG_SHIFT, lblk);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_ALLOC_LIST, fsid, 1,
|
||||
blkno, SCOUTFS_BLOCK_LG_SHIFT, &lblk->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* the data allocator has a single extent */
|
||||
blkno = next_meta++;
|
||||
ret = write_alloc_root(super, meta_fd, &super->data_alloc, bt,
|
||||
blkno, first_data,
|
||||
ret = write_alloc_root(meta_fd, fsid, &super->data_alloc, bt,
|
||||
1, blkno, first_data,
|
||||
le64_to_cpu(super->total_data_blocks));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -298,8 +284,8 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
/* each meta alloc root contains a portion of free metadata extents */
|
||||
for (i = 0; i < array_size(super->meta_alloc); i++) {
|
||||
blkno = next_meta++;
|
||||
ret = write_alloc_root(super, meta_fd, &super->meta_alloc[i], bt,
|
||||
blkno, meta_start,
|
||||
ret = write_alloc_root(meta_fd, fsid, &super->meta_alloc[i], bt,
|
||||
1, blkno, meta_start,
|
||||
min(meta_len,
|
||||
last_meta - meta_start + 1));
|
||||
if (ret < 0)
|
||||
@@ -309,9 +295,11 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
|
||||
/* zero out quorum blocks */
|
||||
hdr = zeros;
|
||||
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
|
||||
ret = write_raw_block(meta_fd, SCOUTFS_QUORUM_BLKNO + i,
|
||||
SCOUTFS_BLOCK_SM_SHIFT, zeros);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_QUORUM, fsid,
|
||||
1, SCOUTFS_QUORUM_BLKNO + i,
|
||||
SCOUTFS_BLOCK_SM_SHIFT, hdr);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "error zeroing quorum block: %s (%d)\n",
|
||||
strerror(-errno), -errno);
|
||||
@@ -320,9 +308,9 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
|
||||
/* write the super block to data dev and meta dev*/
|
||||
super->hdr.seq = cpu_to_le64(1);
|
||||
ret = write_block(data_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
NULL, &super->hdr);
|
||||
ret = write_block(data_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid, 1,
|
||||
SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
&super->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -334,8 +322,9 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
}
|
||||
|
||||
super->flags |= cpu_to_le64(SCOUTFS_FLAG_IS_META_BDEV);
|
||||
ret = write_block(meta_fd, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
NULL, &super->hdr);
|
||||
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_SUPER, fsid,
|
||||
1, SCOUTFS_SUPER_BLKNO, SCOUTFS_BLOCK_SM_SHIFT,
|
||||
&super->hdr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -356,7 +345,7 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
" uuid: %s\n"
|
||||
" 64KB metadata blocks: "SIZE_FMT"\n"
|
||||
" 4KB data blocks: "SIZE_FMT"\n"
|
||||
" quorum count: %u\n",
|
||||
" quorum slots: ",
|
||||
args->meta_device,
|
||||
args->data_device,
|
||||
le64_to_cpu(super->hdr.fsid),
|
||||
@@ -365,8 +354,22 @@ static int do_mkfs(struct mkfs_args *args)
|
||||
SIZE_ARGS(le64_to_cpu(super->total_meta_blocks),
|
||||
SCOUTFS_BLOCK_LG_SIZE),
|
||||
SIZE_ARGS(le64_to_cpu(super->total_data_blocks),
|
||||
SCOUTFS_BLOCK_SM_SIZE),
|
||||
super->quorum_count);
|
||||
SCOUTFS_BLOCK_SM_SIZE));
|
||||
|
||||
indent = "";
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
struct scoutfs_quorum_slot *sl = &super->qconf.slots[i];
|
||||
struct in_addr in;
|
||||
|
||||
if (sl->addr.addr == 0)
|
||||
continue;
|
||||
|
||||
in.s_addr = htonl(le32_to_cpu(sl->addr.addr));
|
||||
printf("%s%u: %s:%u", indent,
|
||||
i, inet_ntoa(in), le16_to_cpu(sl->addr.port));
|
||||
indent = "\n ";
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
@@ -383,16 +386,55 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valid_quorum_slots(struct scoutfs_quorum_slot *slots)
|
||||
{
|
||||
struct in_addr in;
|
||||
bool valid = true;
|
||||
char *addr;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_MAX_SLOTS; i++) {
|
||||
if (slots[i].addr.addr == 0)
|
||||
continue;
|
||||
|
||||
for (j = i + 1; j < SCOUTFS_QUORUM_MAX_SLOTS; j++) {
|
||||
if (slots[j].addr.addr == 0)
|
||||
continue;
|
||||
|
||||
if (slots[i].addr.addr == slots[j].addr.addr &&
|
||||
slots[i].addr.port == slots[j].addr.port) {
|
||||
|
||||
in.s_addr =
|
||||
htonl(le32_to_cpu(slots[i].addr.addr));
|
||||
addr = inet_ntoa(in);
|
||||
fprintf(stderr, "quorum slot nr %u and %u have the same address %s:%u\n",
|
||||
i, j, addr,
|
||||
le16_to_cpu(slots[i].addr.port));
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
struct mkfs_args *args = state->input;
|
||||
struct scoutfs_quorum_slot slot;
|
||||
int ret;
|
||||
|
||||
switch (key) {
|
||||
case 'Q':
|
||||
ret = parse_u64(arg, &args->quorum_count);
|
||||
if (ret)
|
||||
ret = parse_quorum_slot(&slot, arg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (args->slots[ret].addr.addr != 0)
|
||||
argp_error(state, "Quorum slot %u already specified before slot '%s'\n",
|
||||
ret, arg);
|
||||
args->slots[ret] = slot;
|
||||
args->nr_slots++;
|
||||
break;
|
||||
case 'f':
|
||||
args->force = true;
|
||||
@@ -432,12 +474,14 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
argp_error(state, "more than two arguments given");
|
||||
break;
|
||||
case ARGP_KEY_FINI:
|
||||
if (!args->quorum_count)
|
||||
argp_error(state, "must provide nonzero quorum count with --quorum-count|-Q option");
|
||||
if (!args->nr_slots)
|
||||
argp_error(state, "must specify at least one quorum slot with --quorum-count|-Q");
|
||||
if (!args->meta_device)
|
||||
argp_error(state, "no metadata device argument given");
|
||||
if (!args->data_device)
|
||||
argp_error(state, "no data device argument given");
|
||||
if (!valid_quorum_slots(args->slots))
|
||||
argp_error(state, "invalid quorum slot configuration");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -447,7 +491,7 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
|
||||
}
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "quorum-count", 'Q', "NUM", 0, "Number of voters required to use the filesystem [Required]"},
|
||||
{ "quorum-slot", 'Q', "NR,ADDR,PORT", 0, "Specify quorum slot addresses [Required]"},
|
||||
{ "force", 'f', NULL, 0, "Overwrite existing data on block devices"},
|
||||
{ "max-meta-size", 'm', "SIZE", 0, "Use a size less than the base metadata device size (bytes or KMGTP units)"},
|
||||
{ "max-data-size", 'd', "SIZE", 0, "Use a size less than the base data device size (bytes or KMGTP units)"},
|
||||
@@ -463,7 +507,7 @@ static struct argp argp = {
|
||||
|
||||
static int mkfs_cmd(int argc, char *argv[])
|
||||
{
|
||||
struct mkfs_args mkfs_args = {0};
|
||||
struct mkfs_args mkfs_args = {NULL,};
|
||||
int ret;
|
||||
|
||||
ret = argp_parse(&argp, argc, argv, 0, NULL, &mkfs_args);
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
#include "sparse.h"
|
||||
#include "util.h"
|
||||
@@ -152,3 +155,65 @@ int parse_timespec(char *str, struct timespec *ts)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse a quorum slot specification string "NR,ADDR,PORT" into its
|
||||
* component parts. We use sscanf to both parse the leading NR and
|
||||
* trailing PORT integers, and to pull out the inner ADDR string which
|
||||
* is then parsed to make sure that it's a valid unicast ipv4 address.
|
||||
* We require that all components be specified, and sccanf will check
|
||||
* this by the number of matches it returns.
|
||||
*/
|
||||
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg)
|
||||
{
|
||||
#define ADDR_CHARS 45 /* max ipv6 */
|
||||
char addr[ADDR_CHARS + 1] = {'\0',};
|
||||
struct in_addr in;
|
||||
int port;
|
||||
int parsed;
|
||||
int nr;
|
||||
int ret;
|
||||
|
||||
/* leading and trailing ints, an inner sized string without ,, all separated by , */
|
||||
ret = sscanf(arg, "%u,%"__stringify(ADDR_CHARS)"[^,],%u%n",
|
||||
&nr, addr, &port, &parsed);
|
||||
if (ret == EOF) {
|
||||
printf("error parsing quorum slot '%s': %s\n",
|
||||
arg, strerror(errno));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (parsed != strlen(arg)) {
|
||||
printf("extra unparsed trailing characters in quorum slot '%s'\n",
|
||||
arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ret != 3) {
|
||||
printf("failed to parse all three NR,ADDR,PORT tokens in quorum slot '%s'\n", arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
printf("invalid nr '%d' in quorum slot '%s', must be between 0 and %u\n",
|
||||
nr, arg, SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (port <= 0 || port > USHRT_MAX) {
|
||||
printf("invalid ipv4 port '%u' in quorum slot '%s', must be between 1 and %u\n",
|
||||
port, arg, USHRT_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (inet_aton(addr, &in) == 0 || htonl(in.s_addr) == 0 ||
|
||||
htonl(in.s_addr) == UINT_MAX) {
|
||||
printf("invalid ipv4 address '%s' in quorum slot '%s'\n",
|
||||
addr, arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
slot->addr.addr = cpu_to_le32(htonl(in.s_addr));
|
||||
slot->addr.port = cpu_to_le16(port);
|
||||
return nr;
|
||||
}
|
||||
|
||||
@@ -4,11 +4,14 @@
|
||||
#include <sys/time.h>
|
||||
#include <argp.h>
|
||||
|
||||
struct scoutfs_quorum_slot;
|
||||
|
||||
int parse_human(char* str, u64 *val_ret);
|
||||
int parse_u64(char *str, u64 *val_ret);
|
||||
int parse_s64(char *str, s64 *val_ret);
|
||||
int parse_u32(char *str, u32 *val_ret);
|
||||
int parse_timespec(char *str, struct timespec *ts);
|
||||
int parse_quorum_slot(struct scoutfs_quorum_slot *slot, char *arg);
|
||||
|
||||
static inline char* strdup_or_error(const struct argp_state *state, char *str)
|
||||
{
|
||||
|
||||
@@ -796,14 +796,25 @@ static char *alloc_addr_str(struct scoutfs_inet_addr *ia)
|
||||
return str;
|
||||
}
|
||||
|
||||
#define OFF_NAME(x) \
|
||||
{ offsetof(struct scoutfs_quorum_block, x), __stringify_1(x) }
|
||||
|
||||
static int print_quorum_blocks(int fd, struct scoutfs_super_block *super)
|
||||
{
|
||||
struct print_events {
|
||||
size_t offset;
|
||||
char *name;
|
||||
} events[] = {
|
||||
OFF_NAME(write), OFF_NAME(update_term), OFF_NAME(set_leader),
|
||||
OFF_NAME(clear_leader), OFF_NAME(fenced),
|
||||
};
|
||||
struct scoutfs_quorum_block *blk = NULL;
|
||||
struct scoutfs_quorum_block_event *ev;
|
||||
char *log_addr = NULL;
|
||||
u64 blkno;
|
||||
int ret;
|
||||
int i;
|
||||
int j;
|
||||
int e;
|
||||
|
||||
for (i = 0; i < SCOUTFS_QUORUM_BLOCKS; i++) {
|
||||
blkno = SCOUTFS_QUORUM_BLKNO + i;
|
||||
@@ -812,31 +823,21 @@ static int print_quorum_blocks(int fd, struct scoutfs_super_block *super)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (blk->voter_rid != 0) {
|
||||
printf("quorum block blkno %llu\n"
|
||||
" fsid %llx blkno %llu crc 0x%08x\n"
|
||||
" term %llu write_nr %llu voter_rid %016llx "
|
||||
"vote_for_rid %016llx\n"
|
||||
" log_nr %u\n",
|
||||
blkno, le64_to_cpu(blk->fsid),
|
||||
le64_to_cpu(blk->blkno), le32_to_cpu(blk->crc),
|
||||
le64_to_cpu(blk->term),
|
||||
le64_to_cpu(blk->write_nr),
|
||||
le64_to_cpu(blk->voter_rid),
|
||||
le64_to_cpu(blk->vote_for_rid),
|
||||
blk->log_nr);
|
||||
for (j = 0; j < blk->log_nr; j++) {
|
||||
free(log_addr);
|
||||
log_addr = alloc_addr_str(&blk->log[j].addr);
|
||||
if (!log_addr) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
printf(" [%u]: term %llu rid %llu addr %s\n",
|
||||
j, le64_to_cpu(blk->log[j].term),
|
||||
le64_to_cpu(blk->log[j].rid),
|
||||
log_addr);
|
||||
}
|
||||
printf("quorum blkno %llu (slot %llu)\n",
|
||||
blkno, blkno - SCOUTFS_QUORUM_BLKNO);
|
||||
print_block_header(&blk->hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
printf(" term %llu random_write_mark 0x%llx flags 0x%llx\n",
|
||||
le64_to_cpu(blk->term),
|
||||
le64_to_cpu(blk->random_write_mark),
|
||||
le64_to_cpu(blk->flags));
|
||||
|
||||
for (e = 0; e < array_size(events); e++) {
|
||||
ev = (void *)blk + events[e].offset;
|
||||
|
||||
printf(" %12s: rid %016llx ts %llu.%08u\n",
|
||||
events[e].name, le64_to_cpu(ev->rid),
|
||||
le64_to_cpu(ev->ts.sec),
|
||||
le32_to_cpu(ev->ts.nsec));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -850,7 +851,8 @@ out:
|
||||
static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
{
|
||||
char uuid_str[37];
|
||||
char *server_addr;
|
||||
char *addr;
|
||||
int i;
|
||||
|
||||
uuid_unparse(super->uuid, uuid_str);
|
||||
|
||||
@@ -864,16 +866,10 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
le64_to_cpu(super->version), uuid_str);
|
||||
printf(" flags: 0x%016llx\n", le64_to_cpu(super->flags));
|
||||
|
||||
server_addr = alloc_addr_str(&super->server_addr);
|
||||
if (!server_addr)
|
||||
return;
|
||||
|
||||
/* XXX these are all in a crazy order */
|
||||
printf(" next_ino %llu next_trans_seq %llu\n"
|
||||
" total_meta_blocks %llu first_meta_blkno %llu last_meta_blkno %llu\n"
|
||||
" total_data_blocks %llu first_data_blkno %llu last_data_blkno %llu\n"
|
||||
" quorum_fenced_term %llu quorum_server_term %llu unmount_barrier %llu\n"
|
||||
" quorum_count %u server_addr %s\n"
|
||||
" meta_alloc[0]: "ALCROOT_F"\n"
|
||||
" meta_alloc[1]: "ALCROOT_F"\n"
|
||||
" data_alloc: "ALCROOT_F"\n"
|
||||
@@ -894,11 +890,6 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
le64_to_cpu(super->total_data_blocks),
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno),
|
||||
le64_to_cpu(super->quorum_fenced_term),
|
||||
le64_to_cpu(super->quorum_server_term),
|
||||
le64_to_cpu(super->unmount_barrier),
|
||||
super->quorum_count,
|
||||
server_addr,
|
||||
ALCROOT_A(&super->meta_alloc[0]),
|
||||
ALCROOT_A(&super->meta_alloc[1]),
|
||||
ALCROOT_A(&super->data_alloc),
|
||||
@@ -922,7 +913,19 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
|
||||
le64_to_cpu(super->fs_root.ref.blkno),
|
||||
le64_to_cpu(super->fs_root.ref.seq));
|
||||
|
||||
free(server_addr);
|
||||
printf(" quorum config version %llu\n",
|
||||
le64_to_cpu(super->qconf.version));
|
||||
for (i = 0; i < array_size(super->qconf.slots); i++) {
|
||||
if (!super->qconf.slots[i].addr.addr &&
|
||||
!super->qconf.slots[i].addr.port)
|
||||
continue;
|
||||
|
||||
addr = alloc_addr_str(&super->qconf.slots[i].addr);
|
||||
if (addr) {
|
||||
printf(" quorum slot %2u: %s\n", i, addr);
|
||||
free(addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct print_args {
|
||||
|
||||
@@ -114,4 +114,7 @@ static inline int memcmp_lens(const void *a, int a_len,
|
||||
int get_path(char *path, int flags);
|
||||
int read_block(int fd, u64 blkno, int shift, void **ret_val);
|
||||
|
||||
#define __stringify_1(x) #x
|
||||
#define __stringify(x) __stringify_1(x)
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user