scoutfs: add srch infrastructure

This introduces the srch mechanism that we'll use to accelerate finding
files based on the presence of a given named xattr.  This is an
optimized version of the initial prototype that was using locked btree
items for .indx. xattrs.

This is built around specific compressed data structures, having the
operation cost match the reality of orders of magnitude more writers
than readers, and adopting a relaxed locking model.  Combine all of this
and maintaining the xattrs no longer tanks creation rates while
maintaining excellent search latencies, given that searches are defined
as rare and relatively expensive.

The core data type is the srch entry which maps a hashed name to an
inode number.  Mounts can append entries to the end of unsorted log
files during their transaction.  The server tracks these files and
rotates them into a list of files as they get large enough.  Mounts have
compaction work that regularly asks the server for a set of files to
read and combine into a single sorted output file.  The server only
initiates compactions when it sees a number of files of roughly the same
size.  Searches then walk all the commited srch files, both log files
and sorted compacted files, looking for entries that associate an xattr
name with an inode number.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2020-06-23 09:48:08 -07:00
committed by Zach Brown
parent cca83b1758
commit f8e1812288
12 changed files with 2401 additions and 5 deletions

View File

@@ -32,6 +32,7 @@ scoutfs-y += \
scoutfs_trace.o \
server.o \
spbm.o \
srch.o \
super.o \
sysfs.o \
trans.o \

View File

@@ -210,6 +210,28 @@ int scoutfs_client_lock_recover_response(struct super_block *sb, u64 net_id,
net_id, 0, nlr, bytes);
}
/* Find srch files that need to be compacted. */
int scoutfs_client_srch_get_compact(struct super_block *sb,
struct scoutfs_srch_compact_input *scin)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
return scoutfs_net_sync_request(sb, client->conn,
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
NULL, 0, scin, sizeof(*scin));
}
/* Commit the result of a srch file compaction. */
int scoutfs_client_srch_commit_compact(struct super_block *sb,
struct scoutfs_srch_compact_result *scres)
{
struct client_info *client = SCOUTFS_SB(sb)->client_info;
return scoutfs_net_sync_request(sb, client->conn,
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
scres, sizeof(*scres), NULL, 0);
}
/* The client is receiving a invalidation request from the server */
static int client_lock(struct super_block *sb,
struct scoutfs_net_connection *conn, u8 cmd, u64 id,

View File

@@ -20,6 +20,10 @@ int scoutfs_client_lock_response(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock *nl);
int scoutfs_client_lock_recover_response(struct super_block *sb, u64 net_id,
struct scoutfs_net_lock_recover *nlr);
int scoutfs_client_srch_get_compact(struct super_block *sb,
struct scoutfs_srch_compact_input *scin);
int scoutfs_client_srch_commit_compact(struct super_block *sb,
struct scoutfs_srch_compact_result *scres);
int scoutfs_client_setup(struct super_block *sb);
void scoutfs_client_destroy(struct super_block *sb);

View File

@@ -109,6 +109,24 @@
EXPAND_COUNTER(server_commit_prepare) \
EXPAND_COUNTER(server_commit_queue) \
EXPAND_COUNTER(server_commit_worker) \
EXPAND_COUNTER(srch_add_entry) \
EXPAND_COUNTER(srch_compact_dirty_block) \
EXPAND_COUNTER(srch_compact_entry) \
EXPAND_COUNTER(srch_compact_flush) \
EXPAND_COUNTER(srch_compact_free_block) \
EXPAND_COUNTER(srch_compact_log_page) \
EXPAND_COUNTER(srch_compact_removed_entry) \
EXPAND_COUNTER(srch_inconsistent_ref) \
EXPAND_COUNTER(srch_rotate_log) \
EXPAND_COUNTER(srch_search_log) \
EXPAND_COUNTER(srch_search_log_block) \
EXPAND_COUNTER(srch_search_retry_empty) \
EXPAND_COUNTER(srch_search_sorted) \
EXPAND_COUNTER(srch_search_sorted_block) \
EXPAND_COUNTER(srch_search_stale_eio) \
EXPAND_COUNTER(srch_search_stale_retry) \
EXPAND_COUNTER(srch_search_xattrs) \
EXPAND_COUNTER(srch_read_stale) \
EXPAND_COUNTER(trans_commit_data_alloc_low) \
EXPAND_COUNTER(trans_commit_fsync) \
EXPAND_COUNTER(trans_commit_full) \

View File

@@ -24,6 +24,7 @@
#include "block.h"
#include "forest.h"
#include "hash.h"
#include "srch.h"
#include "counters.h"
#include "scoutfs_trace.h"
@@ -65,6 +66,10 @@ struct forest_info {
struct scoutfs_radix_allocator *alloc;
struct scoutfs_block_writer *wri;
struct scoutfs_log_trees our_log;
struct mutex srch_mutex;
struct scoutfs_srch_file srch_file;
struct scoutfs_block *srch_bl;
};
#define DECLARE_FOREST_INFO(sb, name) \
@@ -1457,6 +1462,27 @@ void scoutfs_forest_free_batch(struct super_block *sb, struct list_head *list)
{
}
/*
* Add a srch entry to the current transaction's log file. It will be
* committed in a transaction along with the dirty btree blocks that
* hold dirty items. The srch entries aren't governed by lock
* consistency.
*
* We lock here because of the shared file and block reference.
* Typically these calls are a quick appending to the end of the block,
* but they will allocate or cow blocks every few thousand calls.
*/
int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id)
{
DECLARE_FOREST_INFO(sb, finf);
int ret;
mutex_lock(&finf->srch_mutex);
ret = scoutfs_srch_add(sb, finf->alloc, finf->wri, &finf->srch_file,
&finf->srch_bl, hash, ino, id);
mutex_unlock(&finf->srch_mutex);
return ret;
}
/*
* This is called from transactions as a new transaction opens and is
@@ -1480,6 +1506,9 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
finf->our_log.bloom_ref = lt->bloom_ref;
finf->our_log.rid = lt->rid;
finf->our_log.nr = lt->nr;
finf->srch_file = lt->srch_file;
WARN_ON_ONCE(finf->srch_bl); /* commiting should have put the block */
finf->srch_bl = NULL;
up_write(&finf->rwsem);
}
@@ -1497,6 +1526,10 @@ void scoutfs_forest_get_btrees(struct super_block *sb,
lt->item_root = finf->our_log.item_root;
lt->bloom_ref = finf->our_log.bloom_ref;
lt->srch_file = finf->srch_file;
scoutfs_block_put(sb, finf->srch_bl);
finf->srch_bl = NULL;
trace_scoutfs_forest_prepare_commit(sb, &lt->item_root.ref,
&lt->bloom_ref);
@@ -1516,6 +1549,7 @@ int scoutfs_forest_setup(struct super_block *sb)
/* the finf fields will be setup as we open a transaction */
init_rwsem(&finf->rwsem);
mutex_init(&finf->srch_mutex);
sbi->forest_info = finf;
ret = 0;
@@ -1532,6 +1566,7 @@ void scoutfs_forest_destroy(struct super_block *sb)
struct forest_info *finf = SCOUTFS_SB(sb)->forest_info;
if (finf) {
scoutfs_block_put(sb, finf->srch_bl);
kfree(finf);
sbi->forest_info = NULL;
}

View File

@@ -38,6 +38,7 @@ int scoutfs_forest_delete_save(struct super_block *sb,
int scoutfs_forest_restore(struct super_block *sb, struct list_head *list,
struct scoutfs_lock *lock);
void scoutfs_forest_free_batch(struct super_block *sb, struct list_head *list);
int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id);
void scoutfs_forest_init_btrees(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,

View File

@@ -9,6 +9,8 @@
#define SCOUTFS_BLOCK_MAGIC_BTREE 0xe597f96d
#define SCOUTFS_BLOCK_MAGIC_BLOOM 0x31995604
#define SCOUTFS_BLOCK_MAGIC_RADIX 0xebeb5e65
#define SCOUTFS_BLOCK_MAGIC_SRCH_BLOCK 0x897e4a7d
#define SCOUTFS_BLOCK_MAGIC_SRCH_PARENT 0xb23a2a05
/*
* The super block, quorum block, and file data allocation granularity
@@ -275,6 +277,93 @@ struct scoutfs_mounted_client_btree_val {
#define SCOUTFS_MOUNTED_CLIENT_VOTER (1 << 0)
/*
* srch files are a contiguous run of blocks with compressed entries
* described by a dense parent radix. The files can be stored in
* log_tree items when the files contain unsorted entries written by
* mounts during their transactions. Sorted files of increasing size
* are kept in a btree off the super for searching and further
* compacting.
*/
struct scoutfs_srch_entry {
__le64 hash;
__le64 ino;
__le64 id;
} __packed;
#define SCOUTFS_SRCH_ENTRY_MAX_BYTES (2 + (sizeof(__u64) * 3))
struct scoutfs_srch_ref {
__le64 blkno;
__le64 seq;
} __packed;
struct scoutfs_srch_file {
struct scoutfs_srch_entry first;
struct scoutfs_srch_entry last;
__le64 blocks;
__le64 entries;
struct scoutfs_srch_ref ref;
__u8 height;
} __packed;
struct scoutfs_srch_parent {
struct scoutfs_block_header hdr;
struct scoutfs_srch_ref refs[0];
} __packed;
#define SCOUTFS_SRCH_PARENT_REFS \
((SCOUTFS_BLOCK_LG_SIZE - \
offsetof(struct scoutfs_srch_parent, refs)) / \
sizeof(struct scoutfs_srch_ref))
struct scoutfs_srch_block {
struct scoutfs_block_header hdr;
struct scoutfs_srch_entry first;
struct scoutfs_srch_entry last;
struct scoutfs_srch_entry tail;
__le32 entry_nr;
__le32 entry_bytes;
__u8 entries[0];
} __packed;
/*
* Decoding loads final small deltas with full __u64 loads. Rather than
* check the size before each load we stop coding entries past the point
* where a full size entry could overflow the block. A final entry can
* start at this byte count and consume the rest of the block, though
* its unlikely.
*/
#define SCOUTFS_SRCH_BLOCK_SAFE_BYTES \
(SCOUTFS_BLOCK_LG_SIZE - sizeof(struct scoutfs_srch_block) - \
SCOUTFS_SRCH_ENTRY_MAX_BYTES)
#define SCOUTFS_SRCH_LOG_BLOCK_LIMIT (1024 * 1024 / SCOUTFS_BLOCK_LG_SIZE)
#define SCOUTFS_SRCH_COMPACT_ORDER 3
#define SCOUTFS_SRCH_COMPACT_NR (1 << SCOUTFS_SRCH_COMPACT_ORDER)
struct scoutfs_srch_compact_input {
struct scoutfs_radix_root meta_avail;
struct scoutfs_radix_root meta_freed;
__le64 id;
__u8 nr;
__u8 flags;
struct scoutfs_srch_file sfl[SCOUTFS_SRCH_COMPACT_NR];
} __packed;
struct scoutfs_srch_compact_result {
struct scoutfs_radix_root meta_avail;
struct scoutfs_radix_root meta_freed;
__le64 id;
__u8 flags;
struct scoutfs_srch_file sfl;
} __packed;
/* files are insorted logs */
#define SCOUTFS_SRCH_COMPACT_FLAG_LOG (1 << 0)
/* compaction failed, release inputs */
#define SCOUTFS_SRCH_COMPACT_FLAG_ERROR (1 << 1)
/*
* XXX I imagine we should rename these now that they've evolved to track
* all the btrees that clients use during a transaction. It's not just
@@ -287,6 +376,7 @@ struct scoutfs_log_trees {
struct scoutfs_btree_ref bloom_ref;
struct scoutfs_radix_root data_avail;
struct scoutfs_radix_root data_freed;
struct scoutfs_srch_file srch_file;
__le64 rid;
__le64 nr;
} __packed;
@@ -298,6 +388,7 @@ struct scoutfs_log_trees_val {
struct scoutfs_btree_ref bloom_ref;
struct scoutfs_radix_root data_avail;
struct scoutfs_radix_root data_freed;
struct scoutfs_srch_file srch_file;
} __packed;
struct scoutfs_log_item_value {
@@ -348,6 +439,7 @@ struct scoutfs_bloom_block {
#define SCOUTFS_LOCK_CLIENTS_ZONE 7
#define SCOUTFS_TRANS_SEQ_ZONE 8
#define SCOUTFS_MOUNTED_CLIENT_ZONE 9
#define SCOUTFS_SRCH_ZONE 10
/* inode index zone */
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 1
@@ -372,6 +464,11 @@ struct scoutfs_bloom_block {
/* lock zone, only ever found in lock ranges, never in persistent items */
#define SCOUTFS_RENAME_TYPE 1
/* srch zone, only in server btrees */
#define SCOUTFS_SRCH_LOG_TYPE 1
#define SCOUTFS_SRCH_BLOCKS_TYPE 2
#define SCOUTFS_SRCH_BUSY_TYPE 3
/*
* The extents that map blocks in a fixed-size logical region of a file
* are packed and stored in item values. The packed extents are
@@ -496,6 +593,7 @@ struct scoutfs_super_block {
struct scoutfs_btree_root lock_clients;
struct scoutfs_btree_root trans_seqs;
struct scoutfs_btree_root mounted_clients;
struct scoutfs_btree_root srch_root;
} __packed;
#define SCOUTFS_ROOT_INO 1
@@ -688,6 +786,8 @@ enum {
SCOUTFS_NET_CMD_STATFS,
SCOUTFS_NET_CMD_LOCK,
SCOUTFS_NET_CMD_LOCK_RECOVER,
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
SCOUTFS_NET_CMD_FAREWELL,
SCOUTFS_NET_CMD_UNKNOWN,
};
@@ -734,6 +834,7 @@ struct scoutfs_net_statfs {
struct scoutfs_net_roots {
struct scoutfs_btree_root fs_root;
struct scoutfs_btree_root logs_root;
struct scoutfs_btree_root srch_root;
} __packed;
struct scoutfs_net_lock {

View File

@@ -36,6 +36,7 @@
#include "endian_swap.h"
#include "quorum.h"
#include "trans.h"
#include "srch.h"
/*
* Every active mount can act as the server that listens on a net
@@ -84,6 +85,7 @@ struct server_info {
struct scoutfs_block_writer wri;
struct mutex logs_mutex;
struct mutex srch_mutex;
/* stable versions stored from commits, given in locks and rpcs */
seqcount_t roots_seqcount;
@@ -238,12 +240,14 @@ void scoutfs_server_get_roots(struct super_block *sb,
static void set_roots(struct server_info *server,
struct scoutfs_btree_root *fs_root,
struct scoutfs_btree_root *logs_root)
struct scoutfs_btree_root *logs_root,
struct scoutfs_btree_root *srch_root)
{
preempt_disable();
write_seqcount_begin(&server->roots_seqcount);
server->roots.fs_root = *fs_root;
server->roots.logs_root = *logs_root;
server->roots.srch_root = *srch_root;
write_seqcount_end(&server->roots_seqcount);
preempt_enable();
}
@@ -304,7 +308,8 @@ static void scoutfs_server_commit_func(struct work_struct *work)
}
server->prepared_commit = false;
set_roots(server, &super->fs_root, &super->logs_root);
set_roots(server, &super->fs_root, &super->logs_root,
&super->srch_root);
ret = 0;
out:
node = llist_del_all(&server->commit_waiters);
@@ -475,6 +480,7 @@ unlock:
lt.bloom_ref = ltv.bloom_ref;
lt.data_avail = ltv.data_avail;
lt.data_freed = ltv.data_freed;
lt.srch_file = ltv.srch_file;
lt.rid = key.sklt_rid;
lt.nr = key.sklt_nr;
}
@@ -537,6 +543,16 @@ static int server_commit_log_trees(struct super_block *sb,
goto unlock;
}
/* try to rotate the srch log when big enough */
mutex_lock(&server->srch_mutex);
ret = scoutfs_srch_rotate_log(sb, &server->alloc, &server->wri,
&super->srch_root, &lt->srch_file);
mutex_unlock(&server->srch_mutex);
if (ret < 0) {
scoutfs_err(sb, "server error, rotating srch log: %d", ret);
goto unlock;
}
update_free_blocks(&super->free_meta_blocks, &ltv.meta_avail,
&lt->meta_avail);
update_free_blocks(&super->free_meta_blocks, &ltv.meta_freed,
@@ -552,6 +568,7 @@ static int server_commit_log_trees(struct super_block *sb,
ltv.bloom_ref = lt->bloom_ref;
ltv.data_avail = lt->data_avail;
ltv.data_freed = lt->data_freed;
ltv.srch_file = lt->srch_file;
ret = scoutfs_btree_update(sb, &server->alloc, &server->wri,
&super->logs_root, &key, &ltv, sizeof(ltv));
@@ -970,6 +987,112 @@ int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
NULL, NULL);
}
static int server_srch_get_compact(struct super_block *sb,
struct scoutfs_net_connection *conn,
u8 cmd, u64 id, void *arg, u16 arg_len)
{
DECLARE_SERVER_INFO(sb, server);
u64 rid = scoutfs_net_client_rid(conn);
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
struct scoutfs_srch_compact_input scin;
u64 blocks;
int ret;
int i;
memset(&scin, 0, sizeof(scin));
scoutfs_radix_root_init(sb, &scin.meta_avail, true);
scoutfs_radix_root_init(sb, &scin.meta_freed, true);
if (arg_len != 0) {
ret = -EINVAL;
goto out;
}
ret = scoutfs_server_hold_commit(sb);
if (ret)
goto out;
mutex_lock(&server->srch_mutex);
ret = scoutfs_srch_get_compact(sb, &server->alloc, &server->wri,
&super->srch_root, rid, &scin);
mutex_unlock(&server->srch_mutex);
if (ret == 0 && scin.nr == 0)
ret = -ENOENT;
if (ret < 0)
goto apply;
/* provide ~3x input blocks to allocate, write+delete+cow */
blocks = 0;
for (i = 0; i < scin.nr; i++)
blocks += le64_to_cpu(scin.sfl[i].blocks);
blocks *= 3;
ret = scoutfs_radix_merge(sb, &server->alloc, &server->wri,
&scin.meta_avail, &server->alloc.avail,
&server->alloc.avail, true, blocks);
if (ret < 0)
goto apply;
mutex_lock(&server->srch_mutex);
ret = scoutfs_srch_update_compact(sb, &server->alloc, &server->wri,
&super->srch_root, rid, &scin);
mutex_unlock(&server->srch_mutex);
apply:
ret = scoutfs_server_apply_commit(sb, ret);
WARN_ON_ONCE(ret < 0 && ret != -ENOENT); /* XXX leaked busy item */
out:
return scoutfs_net_response(sb, conn, cmd, id, ret,
&scin, sizeof(scin));
}
static int server_srch_commit_compact(struct super_block *sb,
struct scoutfs_net_connection *conn,
u8 cmd, u64 id, void *arg, u16 arg_len)
{
DECLARE_SERVER_INFO(sb, server);
u64 rid = scoutfs_net_client_rid(conn);
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_super_block *super = &sbi->super;
struct scoutfs_srch_compact_result *scres;
struct scoutfs_radix_root av;
struct scoutfs_radix_root fr;
int ret;
scres = arg;
if (arg_len != sizeof(*scres)) {
ret = -EINVAL;
goto out;
}
ret = scoutfs_server_hold_commit(sb);
if (ret)
goto out;
mutex_lock(&server->srch_mutex);
ret = scoutfs_srch_commit_compact(sb, &server->alloc, &server->wri,
&super->srch_root, rid, scres,
&av, &fr);
mutex_unlock(&server->srch_mutex);
if (ret < 0) /* XXX very bad, leaks allocators */
goto apply;
/* XXX like all merges, doesn't reclaim allocator blocks themselves */
/* merge the client's allocators into freed, commit before reuse */
ret = scoutfs_radix_merge(sb, &server->alloc, &server->wri,
&server->alloc.freed, &av, &av, true,
le64_to_cpu(av.ref.sm_total)) ?:
scoutfs_radix_merge(sb, &server->alloc, &server->wri,
&server->alloc.freed, &fr, &fr, true,
le64_to_cpu(fr.ref.sm_total));
apply:
ret = scoutfs_server_apply_commit(sb, ret);
out:
WARN_ON(ret < 0); /* XXX leaks allocators */
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
}
static void init_mounted_client_key(struct scoutfs_key *key, u64 rid)
{
*key = (struct scoutfs_key) {
@@ -1023,6 +1146,44 @@ static int delete_mounted_client(struct super_block *sb, u64 rid)
return ret;
}
/*
* Remove all the busy items for srch compactions that the mount might
* have been responsible for and reclaim all their allocators.
*/
static int cancel_srch_compact(struct super_block *sb, u64 rid)
{
DECLARE_SERVER_INFO(sb, server);
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
struct scoutfs_radix_root av;
struct scoutfs_radix_root fr;
int ret;
for (;;) {
mutex_lock(&server->srch_mutex);
ret = scoutfs_srch_cancel_compact(sb, &server->alloc,
&server->wri,
&super->srch_root, rid,
&av, &fr);
mutex_unlock(&server->srch_mutex);
if (ret < 0) {
if (ret == -ENOENT)
ret = 0;
break;
}
ret = scoutfs_radix_merge(sb, &server->alloc, &server->wri,
&server->alloc.freed, &av, &av, true,
le64_to_cpu(av.ref.sm_total)) ?:
scoutfs_radix_merge(sb, &server->alloc, &server->wri,
&server->alloc.freed, &fr, &fr, true,
le64_to_cpu(fr.ref.sm_total));
if (WARN_ON_ONCE(ret < 0))
break;
}
return ret;
}
/*
* Process an incoming greeting request in the server from the client.
* We try to send responses to failed greetings so that the sender can
@@ -1283,7 +1444,8 @@ static void farewell_worker(struct work_struct *work)
ret = scoutfs_lock_server_farewell(sb, fw->rid) ?:
remove_trans_seq(sb, fw->rid) ?:
reclaim_log_trees(sb, fw->rid) ?:
delete_mounted_client(sb, fw->rid);
delete_mounted_client(sb, fw->rid) ?:
cancel_srch_compact(sb, fw->rid);
ret = scoutfs_server_apply_commit(sb, ret);
if (ret)
@@ -1397,6 +1559,8 @@ static scoutfs_net_request_t server_req_funcs[] = {
[SCOUTFS_NET_CMD_GET_LAST_SEQ] = server_get_last_seq,
[SCOUTFS_NET_CMD_STATFS] = server_statfs,
[SCOUTFS_NET_CMD_LOCK] = server_lock,
[SCOUTFS_NET_CMD_SRCH_GET_COMPACT] = server_srch_get_compact,
[SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT] = server_srch_commit_compact,
[SCOUTFS_NET_CMD_FAREWELL] = server_farewell,
};
@@ -1483,7 +1647,8 @@ static void scoutfs_server_worker(struct work_struct *work)
if (ret < 0)
goto shutdown;
set_roots(server, &super->fs_root, &super->logs_root);
set_roots(server, &super->fs_root, &super->logs_root,
&super->srch_root);
scoutfs_radix_init_alloc(&server->alloc, &super->core_meta_avail,
&super->core_meta_freed);
scoutfs_block_writer_init(sb, &server->wri);
@@ -1623,6 +1788,7 @@ int scoutfs_server_setup(struct super_block *sb)
INIT_LIST_HEAD(&server->farewell_requests);
INIT_WORK(&server->farewell_work, farewell_worker);
mutex_init(&server->logs_mutex);
mutex_init(&server->srch_mutex);
seqcount_init(&server->roots_seqcount);
server->wq = alloc_workqueue("scoutfs_server",

1974
kmod/src/srch.c Normal file

File diff suppressed because it is too large Load Diff

69
kmod/src/srch.h Normal file
View File

@@ -0,0 +1,69 @@
#ifndef _SCOUTFS_SRCH_H_
#define _SCOUTFS_SRCH_H_
struct scoutfs_block;
struct scoutfs_srch_rb_root {
struct rb_root root;
struct rb_node *last;
unsigned long nr;
};
struct scoutfs_srch_rb_node {
struct rb_node node;
u64 ino;
u64 id;
};
#define scoutfs_srch_foreach_rb_node(snode, node, sroot) \
for (node = rb_first(&(sroot)->root); \
node && (snode = container_of(node, struct scoutfs_srch_rb_node, \
node), 1); \
node = rb_next(node))
int scoutfs_srch_add(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_srch_file *sfl,
struct scoutfs_block **bl_ret,
u64 hash, u64 ino, u64 id);
void scoutfs_srch_destroy_rb_root(struct scoutfs_srch_rb_root *sroot);
int scoutfs_srch_search_xattrs(struct super_block *sb,
struct scoutfs_srch_rb_root *sroot,
u64 hash, u64 ino, u64 last_ino, bool *done);
int scoutfs_srch_rotate_log(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_btree_root *root,
struct scoutfs_srch_file *sfl);
int scoutfs_srch_get_compact(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_btree_root *root,
u64 rid,
struct scoutfs_srch_compact_input *scin_ret);
int scoutfs_srch_update_compact(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_btree_root *root, u64 rid,
struct scoutfs_srch_compact_input *scin);
int scoutfs_srch_commit_compact(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_btree_root *root, u64 rid,
struct scoutfs_srch_compact_result *scres,
struct scoutfs_radix_root *av,
struct scoutfs_radix_root *fr);
int scoutfs_srch_cancel_compact(struct super_block *sb,
struct scoutfs_radix_allocator *alloc,
struct scoutfs_block_writer *wri,
struct scoutfs_btree_root *root, u64 rid,
struct scoutfs_radix_root *av,
struct scoutfs_radix_root *fr);
void scoutfs_srch_destroy(struct super_block *sb);
int scoutfs_srch_setup(struct super_block *sb);
#endif

View File

@@ -41,6 +41,7 @@
#include "sysfs.h"
#include "quorum.h"
#include "forest.h"
#include "srch.h"
#include "scoutfs_trace.h"
static struct dentry *scoutfs_debugfs_root;
@@ -178,6 +179,7 @@ static void scoutfs_put_super(struct super_block *sb)
sbi->shutdown = true;
scoutfs_data_destroy(sb);
scoutfs_srch_destroy(sb);
scoutfs_unlock(sb, sbi->rid_lock, SCOUTFS_LOCK_WRITE);
sbi->rid_lock = NULL;
@@ -452,7 +454,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
scoutfs_client_setup(sb) ?:
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
&sbi->rid_lock) ?:
scoutfs_trans_get_log_trees(sb);
scoutfs_trans_get_log_trees(sb) ?:
scoutfs_srch_setup(sb);
if (ret)
goto out;

View File

@@ -25,6 +25,7 @@ struct options_sb_info;
struct net_info;
struct block_info;
struct forest_info;
struct srch_info;
struct scoutfs_sb_info {
struct super_block *sb;
@@ -44,6 +45,7 @@ struct scoutfs_sb_info {
struct quorum_info *quorum_info;
struct block_info *block_info;
struct forest_info *forest_info;
struct srch_info *srch_info;
wait_queue_head_t trans_hold_wq;
struct task_struct *trans_task;