mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-15 07:52:53 +00:00
Compare commits
1 Commits
main
...
zab/coding
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ee41caa24 |
82
CodingStyle.txt
Normal file
82
CodingStyle.txt
Normal file
@@ -0,0 +1,82 @@
|
||||
|
||||
We try to maintain a consistent coding style across the project. It's
|
||||
admitedly arbitrary and starts with and is based on upstream's
|
||||
Documentation/CodingStyle. Conventions are added here as they come up
|
||||
during review. We'll demonstrate each sylistic preference with a diff
|
||||
snippet.
|
||||
|
||||
== Try to make one exit point for reasonably long functions
|
||||
|
||||
{
|
||||
- void *a;
|
||||
- void *b;
|
||||
+ void *a = NULL;
|
||||
+ void *b = NULL;
|
||||
+ int ret;
|
||||
|
||||
a = kalloc();
|
||||
- if (!a)
|
||||
- return 1;
|
||||
+ if (!a) {
|
||||
+ ret = 1;
|
||||
+ goto out;
|
||||
+ }
|
||||
|
||||
b = kalloc();
|
||||
if (!b) {
|
||||
- kfree(a);
|
||||
- return 2;
|
||||
+ ret = 2;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
- return 3
|
||||
+ ret = 3;
|
||||
+out:
|
||||
+ kfree(a);
|
||||
+ kfree(b);
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
The idea is to initialize all state at the top of the function,
|
||||
modifying it throughout, and clean it all up at the end. Having one
|
||||
exit point also gives us a place to add tracing of function exit.
|
||||
|
||||
== Multiple declarations on a line
|
||||
|
||||
- int i, j;
|
||||
+ int i;
|
||||
+ int j;
|
||||
|
||||
Declare function variables one per line. The verbose declarations
|
||||
create pressure to think about excessive stack use or over-long
|
||||
functions, makes initializers clear, and leaves room for comments.
|
||||
|
||||
== Balance braces
|
||||
|
||||
- if (IS_ERR(super_block))
|
||||
+ if (IS_ERR(super_block)) {
|
||||
return PTR_ERR(super_block);
|
||||
- else {
|
||||
+ } else {
|
||||
*super_res = *super_block;
|
||||
kfree(super_block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*nervous twitch*
|
||||
|
||||
== Cute variable defintion waterfalls
|
||||
|
||||
+ struct block_device *meta_bdev;
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct mount_options opts;
|
||||
- struct block_device *meta_bdev;
|
||||
struct inode *inode;
|
||||
|
||||
This isn't strictly necessary, but it's nice to try and make a pretty
|
||||
descending length of variable distributions. It often has the
|
||||
accidental effect of sorting definitions by decreasing complexity. I
|
||||
tend to group types when the name lengths are pretty close, even if
|
||||
they're not strictly sorted, so that all the ints, u64s, keys, etc, are
|
||||
all together.
|
||||
133
README.md
133
README.md
@@ -1,24 +1,135 @@
|
||||
# Introduction
|
||||
|
||||
scoutfs is a clustered in-kernel Linux filesystem designed to support
|
||||
large archival systems. It features additional interfaces and metadata
|
||||
so that archive agents can perform their maintenance workflows without
|
||||
walking all the files in the namespace. Its cluster support lets
|
||||
deployments add nodes to satisfy archival tier bandwidth targets.
|
||||
scoutfs is a clustered in-kernel Linux filesystem designed and built
|
||||
from the ground up to support large archival systems.
|
||||
|
||||
The design goal is to reach file populations in the trillions, with the
|
||||
archival bandwidth to match, while remaining operational and responsive.
|
||||
Its key differentiating features are:
|
||||
|
||||
Highlights of the design and implementation include:
|
||||
- Integrated consistent indexing accelerates archival maintenance operations
|
||||
- Commit logs allow nodes to write concurrently without contention
|
||||
|
||||
It meets best of breed expectations:
|
||||
|
||||
* Fully consistent POSIX semantics between nodes
|
||||
* Rich metadata to ensure the integrity of metadata references
|
||||
* Atomic transactions to maintain consistent persistent structures
|
||||
* Integrated archival metadata replaces syncing to external databases
|
||||
* Dynamic seperation of resources lets nodes write in parallel
|
||||
* 64bit throughout; no limits on file or directory sizes or counts
|
||||
* First class kernel implementation for high performance and low latency
|
||||
* Open GPLv2 implementation
|
||||
|
||||
Learn more in the [white paper](https://docs.wixstatic.com/ugd/aaa89b_88a5cc84be0b4d1a90f60d8900834d28.pdf).
|
||||
|
||||
# Current Status
|
||||
|
||||
**Alpha Open Source Development**
|
||||
|
||||
scoutfs is under heavy active development. We're developing it in the
|
||||
open to give the community an opportunity to affect the design and
|
||||
implementation.
|
||||
|
||||
The core architectural design elements are in place. Much surrounding
|
||||
functionality hasn't been implemented. It's appropriate for early
|
||||
adopters and interested developers, not for production use.
|
||||
|
||||
In that vein, expect significant incompatible changes to both the format
|
||||
of network messages and persistent structures. Since the format hash-checking
|
||||
has now been removed in preparation for release, if there is any doubt, mkfs
|
||||
is strongly recommended.
|
||||
|
||||
The current kernel module is developed against the RHEL/CentOS 7.x
|
||||
kernel to minimize the friction of developing and testing with partners'
|
||||
existing infrastructure. Once we're happy with the design we'll shift
|
||||
development to the upstream kernel while maintaining distro
|
||||
compatibility branches.
|
||||
|
||||
# Community Mailing List
|
||||
|
||||
Please join us on the open scoutfs-devel@scoutfs.org [mailing list
|
||||
hosted on Google Groups](https://groups.google.com/a/scoutfs.org/forum/#!forum/scoutfs-devel)
|
||||
for all discussion of scoutfs.
|
||||
|
||||
# Quick Start
|
||||
|
||||
**This following a very rough example of the procedure to get up and
|
||||
running, experience will be needed to fill in the gaps. We're happy to
|
||||
help on the mailing list.**
|
||||
|
||||
The requirements for running scoutfs on a small cluster are:
|
||||
|
||||
1. One or more nodes running x86-64 CentOS/RHEL 7.4 (or 7.3)
|
||||
2. Access to two shared block devices
|
||||
3. IPv4 connectivity between the nodes
|
||||
|
||||
The steps for getting scoutfs mounted and operational are:
|
||||
|
||||
1. Get the kernel module running on the nodes
|
||||
2. Make a new filesystem on the devices with the userspace utilities
|
||||
3. Mount the devices on all the nodes
|
||||
|
||||
In this example we use three nodes. The names of the block devices are
|
||||
the same on all the nodes. Two of the nodes will be quorum members. A
|
||||
majority of quorum members must be mounted to elect a leader to run a
|
||||
server that all the mounts connect to. It should be noted that two
|
||||
quorum members results in a majority of one, each member itself, so
|
||||
split brain elections are possible but so unlikely that it's fine for a
|
||||
demonstration.
|
||||
|
||||
1. Get the Kernel Module and Userspace Binaries
|
||||
|
||||
* Either use snapshot RPMs built from git by Versity:
|
||||
|
||||
```shell
|
||||
rpm -i https://scoutfs.s3-us-west-2.amazonaws.com/scoutfs-repo-0.0.1-1.el7_4.noarch.rpm
|
||||
yum install scoutfs-utils kmod-scoutfs
|
||||
```
|
||||
|
||||
* Or use the binaries built from checked out git repositories:
|
||||
|
||||
```shell
|
||||
yum install kernel-devel
|
||||
git clone git@github.com:versity/scoutfs.git
|
||||
make -C scoutfs
|
||||
modprobe libcrc32c
|
||||
insmod scoutfs/kmod/src/scoutfs.ko
|
||||
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
|
||||
```
|
||||
|
||||
2. Make a New Filesystem (**destroys contents**)
|
||||
|
||||
We specify quorum slots with the addresses of each of the quorum
|
||||
member nodes, the metadata device, and the data device.
|
||||
|
||||
```shell
|
||||
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
|
||||
```
|
||||
|
||||
3. Mount the Filesystem
|
||||
|
||||
First, mount each of the quorum nodes so that they can elect and
|
||||
start a server for the remaining node to connect to. The slot numbers
|
||||
were specified with the leading "0,..." and "1,..." in the mkfs options
|
||||
above.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
Then mount the remaining node which can now connect to the running server.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
4. For Kicks, Observe the Metadata Change Index
|
||||
|
||||
The `meta_seq` index tracks the inodes that are changed in each
|
||||
transaction.
|
||||
|
||||
```shell
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/one; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/two; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/one; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
```
|
||||
|
||||
460
ReleaseNotes.md
460
ReleaseNotes.md
@@ -1,460 +0,0 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.26
|
||||
\
|
||||
*Nov 17, 2025*
|
||||
|
||||
Add the ino\_alloc\_per\_lock mount option. This changes the number of
|
||||
inode numbers allocated under each cluster lock and can alleviate lock
|
||||
contention for some patterns of larger file creation.
|
||||
|
||||
Add the tcp\_keepalive\_timeout\_ms mount option. This can enable the
|
||||
system to survive longer periods of networking outages.
|
||||
|
||||
Fix a rare double free of internal btree metadata blocks when merging
|
||||
log trees. The duplicated freed metadata block numbers would cause
|
||||
persistent errors in the server, preventing the server from starting and
|
||||
hanging the system.
|
||||
|
||||
Fix the data\_wait interface to not require the correct data\_version of
|
||||
the inode when raising an error. This lets callers raise errors when
|
||||
they're unable to recall the details of the inode to discover its
|
||||
data\_version.
|
||||
|
||||
Change scoutfs to more aggressively reclaim cached memory when under
|
||||
memory pressure. This makes scoutfs behave more like other kernel
|
||||
components and it integrates better with the reclaim policy heuristics
|
||||
in the VM core of the kernel.
|
||||
|
||||
Change scoutfs to more efficiently transmit and receive socket messages.
|
||||
Under heavy load this can process messages sufficiently more quickly to
|
||||
avoid hung task messages for tasks that were waiting for cluster lock
|
||||
messages to be processed.
|
||||
|
||||
Fix faulty server block commit budget calculations that were generating
|
||||
spurious "holders exceeded alloc budget" console messages.
|
||||
|
||||
---
|
||||
v1.25
|
||||
\
|
||||
*Jun 3, 2025*
|
||||
|
||||
Fix a bug that could cause indefinite retries of failed client commits.
|
||||
Under specific error conditions the client and server's understanding of
|
||||
the current client commit could get out of sync. The client would retry
|
||||
commits indefinitely that could never succeed. This manifested as
|
||||
infinite "critical transaction commit failure" messages in the kernel
|
||||
log on the client and matching "error <nr> committing client logs" on
|
||||
the server.
|
||||
|
||||
Fix a bug in a specific case of server error handling that could result
|
||||
in sending references to unwritten blocks to the client. The client
|
||||
would try to read blocks that hadn't been written and return spurious
|
||||
errors. This was seen under low free space conditions on the server and
|
||||
resulted in error messages with error code 116 (The errno enum for
|
||||
ESTALE, the client's indication that it couldn't read the blocks that it
|
||||
expected.)
|
||||
|
||||
---
|
||||
v1.24
|
||||
\
|
||||
*Mar 14, 2025*
|
||||
|
||||
Add support for coherent read and write mmap() mappings of regular file
|
||||
data between mounts.
|
||||
|
||||
Fix a bug that was causing scoutfs utilities to parse and change some
|
||||
file names before passing them on to the kernel for processing. This
|
||||
fixes spurious scoutfs command errors for files with the offending
|
||||
patterns in their names.
|
||||
|
||||
Fix a bug where rename wasn't updating the ctime of the inode at the
|
||||
destination name if it existed.
|
||||
|
||||
---
|
||||
v1.23
|
||||
\
|
||||
*Dec 11, 2024*
|
||||
|
||||
Add support for kernels in the RHEL 9.5 minor release.
|
||||
|
||||
---
|
||||
v1.22
|
||||
\
|
||||
*Nov 1, 2024*
|
||||
|
||||
Add support for building against the RHEL9 family of kernels.
|
||||
|
||||
Fix failure of the setattr\_more ioctl() to set the attributes of a
|
||||
zero-length file when restoring.
|
||||
|
||||
Fix support for POSIX ACLs in the RHEL8 and later family of kernels.
|
||||
|
||||
Fix a race condition in the lock server that could drop lock requests
|
||||
under heavy load and cause cluster lock attempts to hang.
|
||||
|
||||
---
|
||||
v1.21
|
||||
\
|
||||
*Jul 1, 2024*
|
||||
|
||||
This release adds features that rely on incompatible changes to
|
||||
structure the file system. The process of advancing the format version
|
||||
to enable these features is described in scoutfs(5).
|
||||
|
||||
Added the ".indx." extended attribute tag which can be used to determine
|
||||
the sorting of files in a global index.
|
||||
|
||||
Added ScoutFS quotas which let rules define file size and count limits
|
||||
in terms of ".totl." extended attribute totals.
|
||||
|
||||
Added the project ID file attribute which is inherited from parent
|
||||
directories on creation. ScoutFS quota rules can reference project IDs.
|
||||
|
||||
Add a retention attribute for files which prevents modification once
|
||||
enabled.
|
||||
|
||||
---
|
||||
v1.20
|
||||
\
|
||||
*Apr 22, 2024*
|
||||
|
||||
Minor changes to packaging to better support "weak" module linking of
|
||||
the kernel module, and to including git hashes in the built package. No
|
||||
changes in runtime behaviour.
|
||||
|
||||
---
|
||||
v1.19
|
||||
\
|
||||
*Jan 30, 2024*
|
||||
|
||||
Added the log\_merge\_wait\_timeout\_ms mount option to set the timeout
|
||||
for creating log merge operations. The previous timeout, now the
|
||||
default, was too short for some systems and was resulting in consistent
|
||||
timeouts which created an excessive number of log trees waiting to be
|
||||
merged.
|
||||
|
||||
Improved performance of many in-mount server operations when there are a
|
||||
large number of log trees waiting to be merged.
|
||||
|
||||
---
|
||||
v1.18
|
||||
\
|
||||
*Nov 7, 2023*
|
||||
|
||||
Fixed a bug where background srch file compaction could stop making
|
||||
forward progress if a partial compaction operation was committed at a
|
||||
specific byte offset in a block. This would cause srch file searches to
|
||||
be progressively more expensive over time. Once this fix is running
|
||||
background compaction will resume, bringing the cost of searches back
|
||||
down.
|
||||
|
||||
---
|
||||
v1.17
|
||||
\
|
||||
*Oct 23, 2023*
|
||||
|
||||
Add support for EL8 generation kernels.
|
||||
|
||||
---
|
||||
v1.16
|
||||
\
|
||||
*Oct 4, 2023*
|
||||
|
||||
Fix an issue where the server could hang on startup if its persistent
|
||||
allocator structures were left in a specific degraded state by the
|
||||
previously active server.
|
||||
|
||||
---
|
||||
v1.15
|
||||
\
|
||||
*Jul 17, 2023*
|
||||
|
||||
Process log btree merge splicing in multiple commits. This prevents a
|
||||
rare case where pending log merge completions contain more work than can
|
||||
be done in a single server commit, causing the server to trigger an
|
||||
assert shortly after starting.
|
||||
|
||||
Fix spurious EINVAL from data writes when data\_prealloc\_contig\_only was
|
||||
set to 0.
|
||||
|
||||
---
|
||||
v1.14
|
||||
\
|
||||
*Jun 29, 2023*
|
||||
|
||||
Add get\_referring\_entries ioctl for getting directory entries that
|
||||
refer to an inode.
|
||||
|
||||
Fix excessive CPU use in the move\_blocks interface when moving a large
|
||||
number of extents.
|
||||
|
||||
Reduce fragmented data allocation when contig\_only prealloc is not in
|
||||
use by more consistently allocating multi-block extents within each
|
||||
aligned prealloc region.
|
||||
|
||||
Avoid rare deadlock in metadata block cache recalim under both heavy
|
||||
load and memory pressure.
|
||||
|
||||
Fix crash when using quorum\_heartbeat\_timeout\_ms mount option.
|
||||
|
||||
---
|
||||
v1.13
|
||||
\
|
||||
*May 19, 2023*
|
||||
|
||||
Add the quorum\_heartbeat\_timeout\_ms mount option to set the quorum
|
||||
heartbeat timeout.
|
||||
|
||||
Change some task prioritization and allocation behavior of the quorum
|
||||
agent to help reduce delays in sending and receiving heartbeat messages.
|
||||
|
||||
---
|
||||
v1.12
|
||||
\
|
||||
*Apr 17, 2023*
|
||||
|
||||
Add the prepare-empty-data-device scoutfs command. A data device can be
|
||||
unused when no files have data blocks, perhaps because they're archived
|
||||
and offline. In this case the data device can be swapped out for
|
||||
another device without changes to the metadata device.
|
||||
|
||||
Fix an oversight which limited inode timestamps to second granularity
|
||||
for some operations. All operations now record timestamps with full
|
||||
nanosecond precision.
|
||||
|
||||
Fix spurious ENOENT failures when renaming from other directories into
|
||||
the root directory.
|
||||
|
||||
---
|
||||
v1.11
|
||||
\
|
||||
*Feb 2, 2023*
|
||||
|
||||
Fixed a free extent processing error that could prevent mount from
|
||||
proceeding when free data extents were sufficiently fragmented. It now
|
||||
properly handle very fragmented free extent maps.
|
||||
|
||||
Fixed a statfs server processing race that could return spurious errors
|
||||
and shut down the server. With the race closed statfs processing is
|
||||
reliable.
|
||||
|
||||
Fixed a rare livelock in the move\_blocks ioctl. With the right
|
||||
relationship between ioctl arguments and eventual file extent items the
|
||||
core loop in the move\_blocks ioctl could get stuck looping on an extent
|
||||
item and never return. The loop exit conditions were fixed and the loop
|
||||
will always advance through all extents.
|
||||
|
||||
Changed the 'print' scoutfs commands to flush the block cache for the
|
||||
devices. It was inconvenient to expect cache flushing to be a separate
|
||||
step to ensure consistency with remote node writes.
|
||||
|
||||
---
|
||||
v1.10
|
||||
\
|
||||
*Dec 7, 2022*
|
||||
|
||||
Fixed a potential directory entry cache management deadlock that could
|
||||
occur when many nodes performed heavy metadata write loads across shared
|
||||
directories and their child subdirectories. The deadlock could halt
|
||||
invalidation progress on a node which could then stop use of locks that
|
||||
needed invalidation on that node which would result in almost all tasks
|
||||
hanging on those locks that would never make progress.
|
||||
|
||||
Fixed a circumstance where metadata change sequence index item
|
||||
modification could leave behind old stale metadata sequence items. The
|
||||
duplication case required concurrent metadata updates across mounts with
|
||||
particular open transaction patterns so the duplicate items are rare.
|
||||
They resulted in a small amount of additional load when walking change
|
||||
indexes but had no effect on correctness.
|
||||
|
||||
Fixed a rare case where sparse file extension might not write partial
|
||||
blocks of zeros which was found in testing. This required using
|
||||
truncate to extend files past file sizes that end in partial blocks
|
||||
along with the right transaction commit and memory reclaim patterns.
|
||||
This never affected regular non-sparse files nor files prepopulated with
|
||||
fallocate.
|
||||
|
||||
---
|
||||
v1.9
|
||||
\
|
||||
*Oct 29, 2022*
|
||||
|
||||
Fix VFS cached directory entry consistency verification that could cause
|
||||
spurious "no such file or directory" (ENOENT) errors from rename over
|
||||
NFS under certain conditions. The problem was only every with the
|
||||
consistency of in-memory cached dentry objects, persistent data was
|
||||
correct and eventual eviction of the bad cached objects would stop
|
||||
generating the errors.
|
||||
|
||||
---
|
||||
v1.8
|
||||
\
|
||||
*Oct 18, 2022*
|
||||
|
||||
Add support for Linux POSIX Access Control Lists, as described in
|
||||
acl(5). Mount options are added to enable ("acl") and disable ("noacl")
|
||||
support. The default is to support ACLs. ACLs are stored in the
|
||||
existing extended attribute scheme so adding support is does not require
|
||||
a format change.
|
||||
|
||||
Add options to control data extent preallocation. The default behavior
|
||||
does not change. The options can relax the limits on preallocation
|
||||
which will then trigger under more write patterns and increase the risk
|
||||
of preallocated space which is never used. The options are described in
|
||||
scoutfs(5).
|
||||
|
||||
---
|
||||
v1.7
|
||||
\
|
||||
*Aug 26, 2022*
|
||||
|
||||
* **Fixed possible persistent errors moving freed data extents**
|
||||
\
|
||||
Fixed a case where the server could hit persistent errors trying to
|
||||
move a client's freed extents in one commit. The client had to free
|
||||
a large number of extents that occupied distant positions in the
|
||||
global free extent btree. Very large fragmented files could cause
|
||||
this. The server now moves the freed extents in multiple commits and
|
||||
can always ensure forward progress.
|
||||
|
||||
* **Fixed possible persistent errors from freed duplicate extents**
|
||||
\
|
||||
Background orphan deletion wasn't properly synchronizing with
|
||||
foreground tasks deleting very large files. If a deletion took long
|
||||
enough then background deletion could also attempt to delete inode items
|
||||
while the deletion was making progress. This could create duplicate
|
||||
deletions of data extent items which causes the server to abort when
|
||||
it later discovers the duplicate extents as it merges free lists.
|
||||
|
||||
---
|
||||
v1.6
|
||||
\
|
||||
*Jul 7, 2022*
|
||||
|
||||
* **Fix memory leaks in rare corner cases**
|
||||
\
|
||||
Analysis tools found a few corner cases that leaked small structures,
|
||||
generally around error handling or startup and shutdown.
|
||||
|
||||
* **Add --skip-likely-huge scoutfs print command option**
|
||||
\
|
||||
Add an option to scoutfs print to reduce the size of the output
|
||||
so that it can be used to see system-wide metadata without being
|
||||
overwhelmed by file-level details.
|
||||
|
||||
---
|
||||
v1.5
|
||||
\
|
||||
*Jun 21, 2022*
|
||||
|
||||
* **Fix persistent error during server startup**
|
||||
\
|
||||
Fixed a case where the server would always hit a consistent error on
|
||||
seartup, preventing the system from mounting. This required a rare
|
||||
but valid state across the clients.
|
||||
|
||||
* **Fix a client hang that would lead to fencing**
|
||||
\
|
||||
The client module's use of in-kernel networking was missing annotation
|
||||
that could lead to communication hanging. The server would fence the
|
||||
client when it stopped communicating. This could be identified by the
|
||||
server fencing a client after it disconnected with no attempt by the
|
||||
client to reconnect.
|
||||
|
||||
---
|
||||
v1.4
|
||||
\
|
||||
*May 6, 2022*
|
||||
|
||||
* **Fix possible client crash during server failover**
|
||||
\
|
||||
Fixed a narrow window during server failover and lock recovery that
|
||||
could cause a client mount to believe that it had an inconsistent item
|
||||
cache and panic. This required very specific lock state and messaging
|
||||
patterns between multiple mounts and multiple servers which made it
|
||||
unlikely to occur in the field.
|
||||
|
||||
---
|
||||
v1.3
|
||||
\
|
||||
*Apr 7, 2022*
|
||||
|
||||
* **Fix rare server instability under heavy load**
|
||||
\
|
||||
Fixed a case of server instability under heavy load due to concurrent
|
||||
work fully exhausting metadata block allocation pools reserved for a
|
||||
single server transaction. This would cause brief interruption as the
|
||||
server shutdown and the next server started up and made progress as
|
||||
pending work was retried.
|
||||
|
||||
* **Fix slow fencing preventing server startup**
|
||||
\
|
||||
If a server had to process many fence requests with a slow fencing
|
||||
mechanism it could be interrupted before it finished. The server
|
||||
now makes sure heartbeat messages are sent while it is making progress
|
||||
on fencing requests so that other quorum members don't interrupt the
|
||||
process.
|
||||
|
||||
* **Performance improvement in getxattr and setxattr**
|
||||
\
|
||||
Kernel allocation patterns in the getxattr and setxattr
|
||||
implementations were causing significant contention between CPUs. Their
|
||||
allocation strategy was changed so that concurrent tasks can call these
|
||||
xattr methods without degrading performance.
|
||||
|
||||
---
|
||||
v1.2
|
||||
\
|
||||
*Mar 14, 2022*
|
||||
|
||||
* **Fix deadlock between fallocate() and read() system calls**
|
||||
\
|
||||
Fixed a lock inversion that could cause two tasks to deadlock if they
|
||||
performed fallocate() and read() on a file at the same time. The
|
||||
deadlock was uninterruptible so the machine needed to be rebooted. This
|
||||
was relatively rare as fallocate() is usually used to prepare files
|
||||
before they're used.
|
||||
|
||||
* **Fix instability from heavy file deletion workloads**
|
||||
\
|
||||
Fixed rare circumstances under which background file deletion cleanup
|
||||
tasks could try to delete a file while it is being deleted by another
|
||||
task. Heavy load across multiple nodes, either many files being deleted
|
||||
or large files being deleted, increased the chances of this happening.
|
||||
Heavy staging could cause this problem because staging can create many
|
||||
internal temporary files that need to be deleted.
|
||||
|
||||
---
|
||||
v1.1
|
||||
\
|
||||
*Feb 4, 2022*
|
||||
|
||||
|
||||
* **Add scoutfs(1) change-quorum-config command**
|
||||
\
|
||||
Add a change-quorum-config command to scoutfs(1) to change the quorum
|
||||
configuration stored in the metadata device while the file system is
|
||||
unmounted. This can be used to change the mounts that will
|
||||
participate in quorum and the IP addresses they use.
|
||||
|
||||
* **Fix Rare Risk of Item Cache Corruption**
|
||||
\
|
||||
Code review found a rare potential source of item cache corruption.
|
||||
If this happened it would look as though deleted parts of the filesystem
|
||||
returned, but only at the time they were deleted. Old deleted items are
|
||||
not affected. This problem only affected the item cache, never
|
||||
persistent storage. Unmounting and remounting would drop the bad item
|
||||
cache and resync it with the correct persistent data.
|
||||
|
||||
---
|
||||
v1.0
|
||||
\
|
||||
*Nov 8, 2021*
|
||||
|
||||
|
||||
* **Initial Release**
|
||||
\
|
||||
Version 1.0 marks the first GA release.
|
||||
@@ -5,22 +5,24 @@ ifeq ($(SK_KSRC),)
|
||||
SK_KSRC := $(shell echo /lib/modules/`uname -r`/build)
|
||||
endif
|
||||
|
||||
SCOUTFS_GIT_DESCRIBE ?= \
|
||||
# fail if sparse fails if we find it
|
||||
ifeq ($(shell sparse && echo found),found)
|
||||
SP =
|
||||
else
|
||||
SP = @:
|
||||
endif
|
||||
|
||||
SCOUTFS_GIT_DESCRIBE := \
|
||||
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
|
||||
echo no-git)
|
||||
|
||||
ESCAPED_GIT_DESCRIBE := \
|
||||
$(shell echo $(SCOUTFS_GIT_DESCRIBE) |sed -e 's/\//\\\//g')
|
||||
|
||||
RPM_GITHASH ?= $(shell git rev-parse --short HEAD)
|
||||
|
||||
SCOUTFS_ARGS := SCOUTFS_GIT_DESCRIBE=$(SCOUTFS_GIT_DESCRIBE) \
|
||||
RPM_GITHASH=$(RPM_GITHASH) \
|
||||
CONFIG_SCOUTFS_FS=m -C $(SK_KSRC) M=$(CURDIR)/src \
|
||||
EXTRA_CFLAGS="-Werror"
|
||||
|
||||
# - We use the git describe from tags to set up the RPM versioning
|
||||
RPM_VERSION := $(shell git describe --long --tags | awk -F '-' '{gsub(/^v/,""); print $$1}')
|
||||
RPM_GITHASH := $(shell git rev-parse --short HEAD)
|
||||
TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
|
||||
|
||||
@@ -29,16 +31,17 @@ TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
all: module
|
||||
|
||||
module:
|
||||
$(MAKE) CHECK=$(CURDIR)/src/sparse-filtered.sh C=1 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
make $(SCOUTFS_ARGS)
|
||||
$(SP) make C=2 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
|
||||
|
||||
modules_install:
|
||||
$(MAKE) $(SCOUTFS_ARGS) modules_install
|
||||
make $(SCOUTFS_ARGS) modules_install
|
||||
|
||||
|
||||
%.spec: %.spec.in .FORCE
|
||||
sed -e 's/@@VERSION@@/$(RPM_VERSION)/g' \
|
||||
-e 's/@@GITHASH@@/$(RPM_GITHASH)/g' \
|
||||
-e 's/@@GITDESCRIBE@@/$(ESCAPED_GIT_DESCRIBE)/g' < $< > $@+
|
||||
-e 's/@@GITHASH@@/$(RPM_GITHASH)/g' < $< > $@+
|
||||
mv $@+ $@
|
||||
|
||||
|
||||
@@ -47,4 +50,4 @@ dist: scoutfs-kmod.spec
|
||||
@ tar rf $(TARFILE) --transform="s@\(.*\)@scoutfs-kmod-$(RPM_VERSION)/\1@" scoutfs-kmod.spec
|
||||
|
||||
clean:
|
||||
$(MAKE) $(SCOUTFS_ARGS) clean
|
||||
make $(SCOUTFS_ARGS) clean
|
||||
|
||||
@@ -1,26 +1,18 @@
|
||||
%define kmod_name scoutfs
|
||||
%define kmod_version @@VERSION@@
|
||||
%define kmod_git_hash @@GITHASH@@
|
||||
%define kmod_git_describe @@GITDESCRIBE@@
|
||||
%define pkg_date %(date +%%Y%%m%%d)
|
||||
|
||||
# take kernel version or default to uname -r
|
||||
%{!?kversion: %global kversion %(uname -r)}
|
||||
%global kernel_version %{kversion}
|
||||
|
||||
%if 0%{?el7}
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}.$(arch)
|
||||
%else
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}
|
||||
%endif
|
||||
%global kernel_release() %{kversion}
|
||||
|
||||
%{!?_release: %global _release 0.%{pkg_date}git%{kmod_git_hash}}
|
||||
|
||||
%if 0%{?el7}
|
||||
Name: %{kmod_name}
|
||||
%else
|
||||
Name: kmod-%{kmod_name}
|
||||
%endif
|
||||
Summary: %{kmod_name} kernel module
|
||||
Version: %{kmod_version}
|
||||
Release: %{_release}%{?dist}
|
||||
@@ -28,42 +20,24 @@ License: GPLv2
|
||||
Group: System/Kernel
|
||||
URL: http://scoutfs.org/
|
||||
|
||||
%if 0%{?el7}
|
||||
BuildRequires: %{kernel_module_package_buildreqs}
|
||||
%else
|
||||
BuildRequires: elfutils-libelf-devel
|
||||
%endif
|
||||
BuildRequires: kernel-devel-uname-r = %{kernel_version}
|
||||
BuildRequires: git
|
||||
BuildRequires: kernel-devel-uname-r = %{kernel_version}
|
||||
BuildRequires: module-init-tools
|
||||
|
||||
ExclusiveArch: x86_64
|
||||
|
||||
Source: %{kmod_name}-kmod-%{kmod_version}.tar
|
||||
|
||||
%if 0%{?el7}
|
||||
# Build only for standard kernel variant(s); for debug packages, append "debug"
|
||||
# after "default" (separated by space)
|
||||
%kernel_module_package default
|
||||
%endif
|
||||
|
||||
%global install_mod_dir extra/%{kmod_name}
|
||||
# Disable the building of the debug package(s).
|
||||
%define debug_package %{nil}
|
||||
|
||||
%if ! 0%{?el7}
|
||||
%global flavors_to_build x86_64
|
||||
%endif
|
||||
%global install_mod_dir extra/%{name}
|
||||
|
||||
# el9 sanity: make sure we lock to the minor release we built for and block upgrades
|
||||
%{lua:
|
||||
if string.match(rpm.expand("%{dist}"), "%.el9") then
|
||||
rpm.define("el9 1")
|
||||
end
|
||||
}
|
||||
|
||||
%if 0%{?el9}
|
||||
%define release_major_minor 9.%{lua: print(rpm.expand("%{dist}"):match("%.el9_(%d)"))}
|
||||
Requires: system-release = %{release_major_minor}
|
||||
%endif
|
||||
|
||||
%description
|
||||
%{kmod_name} - kernel module
|
||||
@@ -83,7 +57,7 @@ echo "Building for kernel: %{kernel_version} flavors: '%{flavors_to_build}'"
|
||||
for flavor in %flavors_to_build; do
|
||||
rm -rf obj/$flavor
|
||||
cp -r source obj/$flavor
|
||||
make RPM_GITHASH=%{kmod_git_hash} SCOUTFS_GIT_DESCRIBE=%{kmod_git_describe} SK_KSRC=%{kernel_source $flavor} -C obj/$flavor module
|
||||
make SK_KSRC=%{kernel_source $flavor} -C obj/$flavor module
|
||||
done
|
||||
|
||||
%install
|
||||
@@ -92,7 +66,7 @@ export INSTALL_MOD_DIR=%{install_mod_dir}
|
||||
mkdir -p %{install_mod_dir}
|
||||
for flavor in %{flavors_to_build}; do
|
||||
export KSRC=%{kernel_source $flavor}
|
||||
export KVERSION=%{kversion}
|
||||
export KVERSION=%{kernel_release $KSRC}
|
||||
install -d $INSTALL_MOD_PATH/lib/modules/$KVERSION/%{install_mod_dir}
|
||||
cp $PWD/obj/$flavor/src/scoutfs.ko $INSTALL_MOD_PATH/lib/modules/$KVERSION/%{install_mod_dir}/
|
||||
done
|
||||
@@ -100,23 +74,7 @@ done
|
||||
# mark modules executable so that strip-to-file can strip them
|
||||
find %{buildroot} -type f -name \*.ko -exec %{__chmod} u+x \{\} \;
|
||||
|
||||
%if ! 0%{?el7}
|
||||
%files
|
||||
/lib/modules
|
||||
|
||||
%post
|
||||
echo /lib/modules/%{kversion}/%{install_mod_dir}/scoutfs.ko | weak-modules --add-modules --no-initramfs
|
||||
depmod -a
|
||||
%endif
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
%preun
|
||||
# stash our modules for postun cleanup
|
||||
SCOUTFS_RPM_NAME=$(rpm -q %{name} | grep "%{version}-%{release}")
|
||||
rpm -ql $SCOUTFS_RPM_NAME | grep '\.ko$' > /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
%postun
|
||||
cat /var/run/%{name}-modules-%{version}-%{release} | weak-modules --remove-modules --no-initramfs
|
||||
rm /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
@@ -8,8 +8,6 @@ CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
|
||||
-include $(src)/Makefile.kernelcompat
|
||||
|
||||
scoutfs-y += \
|
||||
acl.o \
|
||||
attr_x.o \
|
||||
avl.o \
|
||||
alloc.o \
|
||||
block.o \
|
||||
@@ -20,23 +18,18 @@ scoutfs-y += \
|
||||
dir.o \
|
||||
export.o \
|
||||
ext.o \
|
||||
fence.o \
|
||||
file.o \
|
||||
forest.o \
|
||||
inode.o \
|
||||
ioctl.o \
|
||||
item.o \
|
||||
kernelcompat.o \
|
||||
lock.o \
|
||||
lock_server.o \
|
||||
msg.o \
|
||||
net.o \
|
||||
omap.o \
|
||||
options.o \
|
||||
per_task.o \
|
||||
quorum.o \
|
||||
quota.o \
|
||||
recov.o \
|
||||
scoutfs_trace.o \
|
||||
server.o \
|
||||
sort_priv.o \
|
||||
@@ -44,12 +37,9 @@ scoutfs-y += \
|
||||
srch.o \
|
||||
super.o \
|
||||
sysfs.o \
|
||||
totl.o \
|
||||
trans.o \
|
||||
triggers.o \
|
||||
tseq.o \
|
||||
volopt.o \
|
||||
wkic.o \
|
||||
xattr.o
|
||||
|
||||
#
|
||||
|
||||
@@ -7,13 +7,23 @@
|
||||
ccflags-y += -include $(src)/kernelcompat.h
|
||||
|
||||
#
|
||||
# v3.18-rc2-19-gb5ae6b15bd73
|
||||
#
|
||||
# Folds d_materialise_unique into d_splice_alias. Note reversal
|
||||
# of arguments (Also note Documentation/filesystems/porting.rst)
|
||||
# v3.10-rc6-21-gbb6f619b3a49
|
||||
#
|
||||
ifneq (,$(shell grep 'd_materialise_unique' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_MATERIALISE_UNIQUE=1
|
||||
# _readdir changes from fop->readdir() to fop->iterate() and from
|
||||
# filldir(dirent) to dir_emit(ctx).
|
||||
#
|
||||
ifneq (,$(shell grep 'iterate.*dir_context' include/linux/fs.h))
|
||||
ccflags-y += -DKC_ITERATE_DIR_CONTEXT
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.10-rc6-23-g5f99f4e79abc
|
||||
#
|
||||
# Helpers including dir_emit_dots() are added in the process of
|
||||
# switching dcache_readdir() from fop->readdir() to fop->iterate()
|
||||
#
|
||||
ifneq (,$(shell grep 'dir_emit_dots' include/linux/fs.h))
|
||||
ccflags-y += -DKC_DIR_EMIT_DOTS
|
||||
endif
|
||||
|
||||
#
|
||||
@@ -24,465 +34,3 @@ endif
|
||||
ifneq (,$(shell grep 'FMODE_KABI_ITERATE' include/linux/fs.h))
|
||||
ccflags-y += -DKC_FMODE_KABI_ITERATE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.7-rc2-23-g0d4d717f2583
|
||||
#
|
||||
# Added user_ns argument to posix_acl_valid
|
||||
#
|
||||
ifneq (,$(shell grep 'posix_acl_valid.*user_namespace' include/linux/posix_acl.h))
|
||||
ccflags-y += -DKC_POSIX_ACL_VALID_USER_NS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.3-12296-g6d2052d188d9
|
||||
#
|
||||
# The RBCOMPUTE function is now passed an extra flag, and should return a bool
|
||||
# to indicate whether the propagated callback should stop or not.
|
||||
#
|
||||
ifneq (,$(shell grep 'static inline bool RBNAME.*_compute_max' include/linux/rbtree_augmented.h))
|
||||
ccflags-y += -DKC_RB_TREE_AUGMENTED_COMPUTE_MAX
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.13-25-g37bc15392a23
|
||||
#
|
||||
# Renames posix_acl_create to __posix_acl_create and provide some
|
||||
# new interfaces for creating ACLs
|
||||
#
|
||||
ifneq (,$(shell grep '__posix_acl_create' include/linux/posix_acl.h))
|
||||
ccflags-y += -DKC___POSIX_ACL_CREATE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.8-rc1-29-g31051c85b5e2
|
||||
#
|
||||
# inode_change_ok() removed - replace with setattr_prepare()
|
||||
# v5.11-rc4-7-g2f221d6f7b88 removes extern attribute
|
||||
#
|
||||
ifneq (,$(shell grep 'int setattr_prepare' include/linux/fs.h))
|
||||
ccflags-y += -DKC_SETATTR_PREPARE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.15-rc3-4-gae5e165d855d
|
||||
#
|
||||
# linux/iversion.h needs to manually be included for code that
|
||||
# manipulates this field.
|
||||
#
|
||||
ifneq (,$(shell grep -s 'define _LINUX_IVERSION_H' include/linux/iversion.h))
|
||||
ccflags-y += -DKC_NEED_LINUX_IVERSION_H=1
|
||||
endif
|
||||
|
||||
# v4.11-12447-g104b4e5139fe
|
||||
#
|
||||
# Renamed __percpu_counter_add to percpu_counter_add_batch to clarify
|
||||
# that the __ wasn't less safe, just took an extra parameter.
|
||||
#
|
||||
ifneq (,$(shell grep 'percpu_counter_add_batch' include/linux/percpu_counter.h))
|
||||
ccflags-y += -DKC_PERCPU_COUNTER_ADD_BATCH
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.11-4550-g7dea19f9ee63
|
||||
#
|
||||
# Introduced memalloc_nofs_{save,restore} preferred instead of _noio_.
|
||||
#
|
||||
ifneq (,$(shell grep 'memalloc_nofs_save' include/linux/sched/mm.h))
|
||||
ccflags-y += -DKC_MEMALLOC_NOFS_SAVE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.7-12414-g1eff9d322a44
|
||||
#
|
||||
# Renamed bi_rw to bi_opf to force old code to catch up. We use it as a
|
||||
# single switch between old and new bio structures.
|
||||
#
|
||||
ifneq (,$(shell grep 'bi_opf' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_BIO_BI_OPF
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.12-rc2-201-g4e4cbee93d56
|
||||
#
|
||||
# Moves to bi_status BLK_STS_ API instead of having a mix of error
|
||||
# end_io args or bi_error.
|
||||
#
|
||||
ifneq (,$(shell grep 'bi_status' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_BIO_BI_STATUS
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.11-8765-ga0b02131c5fc
|
||||
#
|
||||
# Remove the old ->shrink() API, ->{scan,count}_objects is preferred.
|
||||
#
|
||||
ifneq (,$(shell grep '(*shrink)' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_SHRINK
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4777-g6bec00352861
|
||||
#
|
||||
# backing_dev_info is removed from address_space. Instead we need to use
|
||||
# inode_to_bdi() inline from <backing-dev.h>.
|
||||
#
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_BACKING_DEV_INFO=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.3-9290-ge409de992e3e
|
||||
#
|
||||
# xattr handlers are now passed a struct that contains `flags`
|
||||
#
|
||||
ifneq (,$(shell grep 'int...get..const struct xattr_handler.*struct dentry.*dentry,' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_STRUCT_XATTR_HANDLER=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc1-1-g9b2c45d479d0
|
||||
#
|
||||
# kernel_getsockname() and kernel_getpeername dropped addrlen arg
|
||||
#
|
||||
ifneq (,$(shell grep 'kernel_getsockname.*,$$' include/linux/net.h))
|
||||
ccflags-y += -DKC_KERNEL_GETSOCKNAME_ADDRLEN=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.1-rc1-410-geeb1bd5c40ed
|
||||
#
|
||||
# Adds a struct net parameter to sock_create_kern
|
||||
#
|
||||
ifneq (,$(shell grep 'sock_create_kern.*struct net' include/linux/net.h))
|
||||
ccflags-y += -DKC_SOCK_CREATE_KERN_NET=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.17-rc6-7-g95582b008388
|
||||
#
|
||||
# Kernel has current_time(inode) to uniformly retreive timespec in the right unit
|
||||
#
|
||||
ifneq (,$(shell grep 'struct timespec64 current_time' include/linux/fs.h))
|
||||
ccflags-y += -DKC_CURRENT_TIME_INODE=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.9-12228-g530e9b76ae8f
|
||||
#
|
||||
# register_cpu_notifier and family were all removed and to be
|
||||
# replaced with cpuhp_* API calls.
|
||||
#
|
||||
ifneq (,$(shell grep 'define register_hotcpu_notifier' include/linux/cpu.h))
|
||||
ccflags-y += -DKC_CPU_NOTIFIER
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.14-rc8-130-gccad2365668f
|
||||
#
|
||||
# generic_file_buffered_write is removed, backport it
|
||||
#
|
||||
ifneq (,$(shell grep 'extern ssize_t generic_file_buffered_write' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_FILE_BUFFERED_WRITE=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-438-g8151b4c8bee4
|
||||
#
|
||||
# struct address_space_operations switches away from .readpages to .readahead
|
||||
#
|
||||
# RHEL has backported this feature all the way to RHEL8, as part of RHEL_KABI,
|
||||
# which means we need to detect this very precisely
|
||||
#
|
||||
ifneq (,$(shell grep 'readahead.*struct readahead_control' include/linux/fs.h))
|
||||
ccflags-y += -DKC_FILE_AOPS_READAHEAD
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.0-rc7-1743-g8436318205b9
|
||||
#
|
||||
# .aio_read and .aio_write no longer exist. All reads and writes now use the
|
||||
# .read_iter and .write_iter methods, or must implement .read and .write (which
|
||||
# we don't).
|
||||
#
|
||||
ifneq (,$(shell grep 'ssize_t.*aio_read' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_HAVE_FOP_AIO_READ=1
|
||||
endif
|
||||
|
||||
#
|
||||
# rhel7 has a custom inode_operations_wrapper struct that is discarded
|
||||
# entirely in favor of upstream structure since rhel8.
|
||||
#
|
||||
ifneq (,$(shell grep 'void.*follow_link.*struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_HAVE_RHEL_IOPS_WRAPPER=1
|
||||
endif
|
||||
|
||||
ifneq (,$(shell grep 'size_t.*ki_left;' include/linux/aio.h))
|
||||
ccflags-y += -DKC_LINUX_AIO_KI_LEFT=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.4-rc4-4-g98e9cb5711c6
|
||||
#
|
||||
# Introduces a new xattr_handler .name member that can be used to match the
|
||||
# entire field, instead of just a prefix. For these kernels, we must use
|
||||
# the new .name field instead.
|
||||
ifneq (,$(shell grep 'static inline const char .xattr_prefix' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_HANDLER_NAME=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-96-g342a72a33407
|
||||
#
|
||||
# Adds `typedef __u32 __bitwise blk_opf_t` to aid flag checking
|
||||
ifneq (,$(shell grep 'typedef __u32 __bitwise blk_opf_t' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_OPF_T=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.12-rc6-9-g4f0f586bf0c8
|
||||
#
|
||||
# list_sort cmp function takes const list_head args
|
||||
ifneq (,$(shell grep 'const struct list_head ., const struct list_head .' include/linux/list_sort.h))
|
||||
ccflags-y += -DKC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
endif
|
||||
|
||||
# v5.7-523-g88dca4ca5a93
|
||||
#
|
||||
# The pgprot argument to vmalloc is always PAGE_KERNEL, so it is removed.
|
||||
ifneq (,$(shell grep 'extern void .__vmalloc.unsigned long size, gfp_t gfp_mask, pgprot_t prot' include/linux/vmalloc.h))
|
||||
ccflags-y += -DKC_VMALLOC_PGPROT_T
|
||||
endif
|
||||
|
||||
# v6.2-rc1-18-g01beba7957a2
|
||||
#
|
||||
# fs: port inode_owner_or_capable() to mnt_idmap
|
||||
ifneq (,$(shell grep 'bool inode_owner_or_capable.struct user_namespace .mnt_userns' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_OWNER_OR_CAPABLE_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-5-g47291baa8ddf
|
||||
#
|
||||
# namei: make permission helpers idmapped mount aware
|
||||
ifneq (,$(shell grep 'int inode_permission.struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_PERMISSION_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-24-g549c7297717c
|
||||
#
|
||||
# fs: make helpers idmap mount aware
|
||||
# Enlarges the VFS API methods to include user namespace argument.
|
||||
ifneq (,$(shell grep 'int ..mknod. .struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.2-rc1-2-gabf08576afe3
|
||||
#
|
||||
# fs: vfs methods use struct mnt_idmap instead of struct user_namespace
|
||||
ifneq (,$(shell grep 'int vfs_mknod.struct mnt_idmap' include/linux/fs.h))
|
||||
ccflags-y += -DKC_VFS_METHOD_MNT_IDMAP_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-rc2-21-g07888c665b40
|
||||
#
|
||||
# Detect new style bio_alloc - pass bdev and opf.
|
||||
ifneq (,$(shell grep 'struct bio .bio_alloc.struct block_device .bdev' include/linux/bio.h))
|
||||
ccflags-y += -DKC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc4-53-gcddf8a2c4a82
|
||||
#
|
||||
# fiemap_prep() replaces fiemap_check_flags()
|
||||
ifneq (,$(shell grep -s 'int fiemap_prep.struct inode' include/linux/fiemap.h))
|
||||
ccflags-y += -DKC_FIEMAP_PREP
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-13043-g800ba29547e1
|
||||
#
|
||||
# generic_perform_write args use kiocb for passing filp and pos
|
||||
ifneq (,$(shell grep 'ssize_t generic_perform_write.struct kiocb ., struct iov_iter' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2496-g76ee0785f42a
|
||||
#
|
||||
# net: add sock_set_sndtimeo
|
||||
ifneq (,$(shell grep 'void sock_set_sndtimeo.struct sock' include/net/sock.h))
|
||||
ccflags-y += -DKC_SOCK_SET_SNDTIMEO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.8-rc4-1931-gba423fdaa589
|
||||
#
|
||||
# setsockopt functions are now passed a sockptr_t value instead of char*
|
||||
ifneq (,$(shell grep -s 'include .linux/sockptr.h.' include/linux/net.h))
|
||||
ccflags-y += -DKC_SETSOCKOPT_SOCKPTR_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2507-g71c48eb81c9e
|
||||
#
|
||||
# Adds a bunch of low level TCP sock parameter functions that we want to use.
|
||||
ifneq (,$(shell grep 'int tcp_sock_set_keepintvl' include/linux/tcp.h))
|
||||
ccflags-y += -DKC_HAVE_TCP_SET_SOCKFN
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc3-13-ga84d1169164b
|
||||
#
|
||||
# Fixes y2038 issues with struct timeval.
|
||||
ifneq (,$(shell grep -s '^struct __kernel_old_timeval .' include/uapi/linux/time_types.h))
|
||||
ccflags-y += -DKC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-52-ge33c267ab70d
|
||||
#
|
||||
# register_shrinker now requires a name, used for debug stats etc.
|
||||
ifneq (,$(shell grep 'int __printf.*register_shrinker.struct shrinker .shrinker,' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_NAME
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-246-gf132ab7d3ab0
|
||||
#
|
||||
# mpage_readpage() is now replaced with mpage_read_folio.
|
||||
ifneq (,$(shell grep 'int mpage_read_folio.struct folio .folio' include/linux/mpage.h))
|
||||
ccflags-y += -DKC_MPAGE_READ_FOLIO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-219-gb3992d1e2ebc
|
||||
#
|
||||
# block_write_begin() no longer is being passed aop_flags
|
||||
ifneq (,$(shell grep -C1 'int block_write_begin' include/linux/buffer_head.h | tail -n 2 | grep 'unsigned flags'))
|
||||
ccflags-y += -DKC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.0-rc6-9-g863f144f12ad
|
||||
#
|
||||
# the .tmpfile() vfs method calling convention changed and now a struct
|
||||
# file* is passed to this metiond instead of a dentry. The function also
|
||||
# should open the created file and call finish_open_simple() before returning.
|
||||
ifneq (,$(shell grep 'extern void d_tmpfile.struct dentry' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_TMPFILE_DENTRY
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-201-g0733ad800291
|
||||
#
|
||||
# New blk_mode_t replaces abuse of fmode_t
|
||||
ifneq (,$(shell grep 'typedef unsigned int __bitwise blk_mode_t' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_MODE_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-186-g2736e8eeb0cc
|
||||
#
|
||||
# Reworks FMODE_EXCL kludge and instead modifies the blkdev_put() call to pass in
|
||||
# the (exclusive) holder to implement FMODE_EXCL handling.
|
||||
ifneq (,$(shell grep 'blkdev_put.struct block_device .bdev, void .holder' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BLKDEV_PUT_HOLDER_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc4-163-g0d625446d0a4
|
||||
#
|
||||
# Entirely removes current->backing_dev_info to ultimately remove buffer_head
|
||||
# completely at some point.
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info;' include/linux/sched.h))
|
||||
ccflags-y += -DKC_CURRENT_BACKING_DEV_INFO
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.8-rc1-4-gf3a608827d1f
|
||||
#
|
||||
# adds bdev_file_open_by_path() and later in v6.8-rc1-30-ge97d06a46526 removes bdev_open_by_path()
|
||||
# which requires us to use the file method from now on.
|
||||
ifneq (,$(shell grep 'struct file.*bdev_file_open_by_path.const char.*path' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BDEV_FILE_OPEN_BY_PATH
|
||||
endif
|
||||
|
||||
# v4.0-rc7-1796-gfe0f07d08ee3
|
||||
#
|
||||
# direct-io changes modify inode_dio_done to now be called inode_dio_end
|
||||
ifneq (,$(shell grep 'void inode_dio_end.struct inode' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_DIO_END
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.0-6476-g3d3539018d2c
|
||||
#
|
||||
# page fault handlers return a bitmask vm_fault_t instead
|
||||
# Note: el8's header has a slightly modified prefix here
|
||||
ifneq (,$(shell grep 'typedef.*__bitwise unsigned.*int vm_fault_t' include/linux/mm_types.h))
|
||||
ccflags-y += -DKC_MM_VM_FAULT_T
|
||||
endif
|
||||
|
||||
# v3.19-499-gd83a08db5ba6
|
||||
#
|
||||
# .remap pages becomes obsolete
|
||||
ifneq (,$(shell grep 'int ..remap_pages..struct vm_area_struct' include/linux/mm.h))
|
||||
ccflags-y += -DKC_MM_REMAP_PAGES
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4742-g503c358cf192
|
||||
#
|
||||
# list_lru_shrink_count() and list_lru_shrink_walk() introduced
|
||||
#
|
||||
ifneq (,$(shell grep 'list_lru_shrink_count.*struct list_lru' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_SHRINK_COUNT_WALK
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4757-g3f97b163207c
|
||||
#
|
||||
# lru_list_walk_cb lru arg added
|
||||
#
|
||||
ifneq (,$(shell grep 'struct list_head \*item, spinlock_t \*lock, void \*cb_arg' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_WALK_CB_ITEM_LOCK
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.7-rc4-153-g0a97c01cd20b
|
||||
#
|
||||
# list_lru_{add,del} -> list_lru_{add,del}_obj
|
||||
#
|
||||
ifneq (,$(shell grep '^bool list_lru_add_obj' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_ADD_OBJ
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.12-rc6-227-gda0c02516c50
|
||||
#
|
||||
# lru_list_walk_cb lock arg removed
|
||||
#
|
||||
ifneq (,$(shell grep 'struct list_lru_one \*list, spinlock_t \*lock, void \*cb_arg' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_WALK_CB_LIST_LOCK
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.1-rc4-273-ge9b98e162aa5
|
||||
#
|
||||
# introduce stack trace helpers
|
||||
#
|
||||
ifneq (,$(shell grep '^unsigned int stack_trace_save' include/linux/stacktrace.h))
|
||||
ccflags-y += -DKC_STACK_TRACE_SAVE
|
||||
endif
|
||||
|
||||
# v6.1-rc1-4-g7420332a6ff4
|
||||
#
|
||||
# .get_acl() method now has dentry arg (and mnt_idmap). The old get_acl has been renamed
|
||||
# to get_inode_acl() and is still available as well, but has an extra rcu param.
|
||||
ifneq (,$(shell grep 'struct posix_acl ...get_acl..struct mnt_idmap ., struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GET_ACL_DENTRY
|
||||
endif
|
||||
|
||||
400
kmod/src/acl.c
400
kmod/src/acl.c
@@ -1,400 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "xattr.h"
|
||||
#include "acl.h"
|
||||
#include "inode.h"
|
||||
#include "trans.h"
|
||||
|
||||
/*
|
||||
* POSIX draft ACLs are stored as full xattr items with the entries
|
||||
* encoded as the kernel's posix_acl_xattr_{header,entry} value structs.
|
||||
*
|
||||
* They're accessed and modified via user facing synthetic xattrs, iops
|
||||
* calls from the kernel, during inode mode changes, and during inode
|
||||
* creation.
|
||||
*
|
||||
* ACL access devolves into xattr access which is relatively expensive
|
||||
* so we maintain the cached native form in the vfs inode. We drop the
|
||||
* cache in lock invalidation which means that cached acl access must
|
||||
* always be performed under cluster locking.
|
||||
*/
|
||||
|
||||
static int acl_xattr_name_len(int type, char **name, size_t *name_len)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
*name = XATTR_NAME_POSIX_ACL_ACCESS;
|
||||
if (name_len)
|
||||
*name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
*name = XATTR_NAME_POSIX_ACL_DEFAULT;
|
||||
if (name_len)
|
||||
*name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
char *value = NULL;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
|
||||
acl = get_cached_acl(inode, type);
|
||||
if (acl != ACL_NOT_CACHED)
|
||||
return acl;
|
||||
#endif
|
||||
|
||||
ret = acl_xattr_name_len(type, &name, NULL);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = scoutfs_xattr_get_locked(inode, name, NULL, 0, lock);
|
||||
if (ret > 0) {
|
||||
value = kzalloc(ret, GFP_NOFS);
|
||||
if (!value)
|
||||
ret = -ENOMEM;
|
||||
else
|
||||
ret = scoutfs_xattr_get_locked(inode, name, value, ret, lock);
|
||||
}
|
||||
if (ret > 0) {
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, ret);
|
||||
} else if (ret == -ENODATA || ret == 0) {
|
||||
acl = NULL;
|
||||
} else {
|
||||
acl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* can set null negative cache */
|
||||
if (!IS_ERR(acl))
|
||||
set_cached_acl(inode, type, acl);
|
||||
|
||||
kfree(value);
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
struct posix_acl *scoutfs_get_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, int type)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
#else
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
#endif
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct posix_acl *acl;
|
||||
int ret;
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &lock);
|
||||
if (ret < 0) {
|
||||
acl = ERR_PTR(ret);
|
||||
} else {
|
||||
acl = scoutfs_get_acl_locked(inode, type, lock);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
}
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has acquired the locks and dirtied the inode, they'll
|
||||
* update the inode item if we return 0.
|
||||
*/
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks)
|
||||
{
|
||||
static const struct scoutfs_xattr_prefix_tags tgs = {0,}; /* never scoutfs. prefix */
|
||||
bool set_mode = false;
|
||||
char *value = NULL;
|
||||
umode_t new_mode;
|
||||
size_t name_len;
|
||||
char *name;
|
||||
int size = 0;
|
||||
int ret;
|
||||
|
||||
ret = acl_xattr_name_len(type, &name, &name_len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
if (acl) {
|
||||
ret = posix_acl_update_mode(KC_VFS_INIT_NS
|
||||
inode, &new_mode, &acl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
set_mode = true;
|
||||
}
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
if (!S_ISDIR(inode->i_mode)) {
|
||||
ret = acl ? -EINVAL : 0;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (acl) {
|
||||
size = posix_acl_xattr_size(acl->a_count);
|
||||
value = kmalloc(size, GFP_NOFS);
|
||||
if (!value) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_xattr_set_locked(inode, name, name_len, value, size, 0, &tgs,
|
||||
lock, NULL, ind_locks);
|
||||
if (ret == 0 && set_mode) {
|
||||
inode->i_mode = new_mode;
|
||||
if (!value) {
|
||||
/* can be setting an acl that only affects mode, didn't need xattr */
|
||||
inode_inc_iversion(inode);
|
||||
inode->i_ctime = current_time(inode);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (!ret)
|
||||
set_cached_acl(inode, type, acl);
|
||||
|
||||
kfree(value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct posix_acl *acl, int type)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
#else
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
#endif
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock) ?:
|
||||
scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret == 0) {
|
||||
ret = scoutfs_dirty_inode_item(inode, lock) ?:
|
||||
scoutfs_set_acl_locked(inode, acl, type, lock, &ind_locks);
|
||||
if (ret == 0)
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
}
|
||||
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
return ret;
|
||||
}
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *handler, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size)
|
||||
{
|
||||
int type = handler->flags;
|
||||
#else
|
||||
int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value, size_t size,
|
||||
int type)
|
||||
{
|
||||
#endif
|
||||
struct posix_acl *acl;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
acl = scoutfs_get_acl(KC_VFS_INIT_NS
|
||||
dentry, type);
|
||||
#else
|
||||
acl = scoutfs_get_acl(dentry->d_inode, type);
|
||||
#endif
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl == NULL)
|
||||
return -ENODATA;
|
||||
|
||||
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
|
||||
posix_acl_release(acl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *handler,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags)
|
||||
{
|
||||
int type = handler->flags;
|
||||
#else
|
||||
int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size,
|
||||
int flags, int type)
|
||||
{
|
||||
#endif
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret;
|
||||
|
||||
if (!inode_owner_or_capable(KC_VFS_INIT_NS dentry->d_inode))
|
||||
return -EPERM;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (value) {
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, size);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
if (acl) {
|
||||
ret = kc_posix_acl_valid(&init_user_ns, acl);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
ret = scoutfs_set_acl(KC_VFS_INIT_NS dentry, acl, type);
|
||||
#else
|
||||
ret = scoutfs_set_acl(dentry->d_inode, acl, type);
|
||||
#endif
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply the parent's default acl to new inodes access acl and inherit
|
||||
* it as the default for new directories. The caller holds locks and a
|
||||
* transaction.
|
||||
*/
|
||||
int scoutfs_init_acl_locked(struct inode *inode, struct inode *dir,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *dir_lock,
|
||||
struct list_head *ind_locks)
|
||||
{
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (!S_ISLNK(inode->i_mode)) {
|
||||
if (IS_POSIXACL(dir)) {
|
||||
acl = scoutfs_get_acl_locked(dir, ACL_TYPE_DEFAULT, dir_lock);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
}
|
||||
|
||||
if (!acl)
|
||||
inode->i_mode &= ~current_umask();
|
||||
}
|
||||
|
||||
if (IS_POSIXACL(dir) && acl) {
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_DEFAULT,
|
||||
lock, ind_locks);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
ret = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret > 0)
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_ACCESS,
|
||||
lock, ind_locks);
|
||||
} else {
|
||||
cache_no_acl(inode);
|
||||
}
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the access ACL based on a newly set mode. If we return an
|
||||
* error then the xattr wasn't changed.
|
||||
*
|
||||
* Annoyingly, setattr_copy has logic that transforms the final set mode
|
||||
* that we want to use to update the acl. But we don't want to modify
|
||||
* the other inode fields while discovering the resulting mode. We're
|
||||
* relying on acl_chmod not caring about the transformation (currently
|
||||
* just clears sgid). It would be better if we could get the resulting
|
||||
* mode to give to acl_chmod without modifying the other inode fields.
|
||||
*
|
||||
* The caller has the inode mutex, a cluster lock, transaction, and will
|
||||
* update the inode item if we return success.
|
||||
*/
|
||||
int scoutfs_acl_chmod_locked(struct inode *inode, struct iattr *attr,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_POSIXACL(inode) || !(attr->ia_valid & ATTR_MODE))
|
||||
return 0;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
acl = scoutfs_get_acl_locked(inode, ACL_TYPE_ACCESS, lock);
|
||||
if (IS_ERR_OR_NULL(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
ret = __posix_acl_chmod(&acl, GFP_KERNEL, attr->ia_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_ACCESS, lock, ind_locks);
|
||||
posix_acl_release(acl);
|
||||
return ret;
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
#ifndef _SCOUTFS_ACL_H_
|
||||
#define _SCOUTFS_ACL_H_
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
struct posix_acl *scoutfs_get_acl(KC_VFS_NS_DEF struct dentry *dentry, int type);
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF struct dentry *dentry, struct posix_acl *acl, int type);
|
||||
#else
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type);
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
#endif
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock);
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size);
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags);
|
||||
#else
|
||||
int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value, size_t size,
|
||||
int type);
|
||||
int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size,
|
||||
int flags, int type);
|
||||
#endif
|
||||
int scoutfs_acl_chmod_locked(struct inode *inode, struct iattr *attr,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
int scoutfs_init_acl_locked(struct inode *inode, struct inode *dir,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *dir_lock,
|
||||
struct list_head *ind_locks);
|
||||
#endif
|
||||
659
kmod/src/alloc.c
659
kmod/src/alloc.c
@@ -14,7 +14,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
@@ -30,8 +29,8 @@
|
||||
* The core allocator uses extent items in btrees rooted in the super.
|
||||
* Each free extent is stored in two items. The first item is indexed
|
||||
* by block location and is used to merge adjacent extents when freeing.
|
||||
* The second item is indexed by the order of the length and is used to
|
||||
* find large extents to allocate from.
|
||||
* The second item is indexed by length and is used to find large
|
||||
* extents to allocate from.
|
||||
*
|
||||
* Free extent always consumes the front of the largest extent. This
|
||||
* attempts to discourage fragmentation by given smaller freed extents
|
||||
@@ -67,97 +66,26 @@
|
||||
* blocks to modify the next blocks, and swaps them at each transaction.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Return the order of the length of a free extent, which we define as
|
||||
* floor(log_8_(len)): 0..7 = 0, 8..63 = 1, etc.
|
||||
*/
|
||||
static u64 free_extent_order(u64 len)
|
||||
{
|
||||
return (fls64(len | 1) - 1) / 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* The smallest (non-zero) length that will be mapped to the same order
|
||||
* as the given length.
|
||||
*/
|
||||
static u64 smallest_order_length(u64 len)
|
||||
{
|
||||
return 1ULL << (free_extent_order(len) * 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Moving an extent between trees can dirty blocks in several ways. This
|
||||
* function calculates worst case number of blocks across these scenarions.
|
||||
* We treat the alloc and free counts independently, so the values below are
|
||||
* max(allocated, freed), not the sum.
|
||||
*
|
||||
* We track extents with two separate btree items: by block number and by size.
|
||||
*
|
||||
* If we're removing an extent from the btree (allocating), we can dirty
|
||||
* two blocks if the keys are in different leaves. If we wind up merging
|
||||
* leaves because we fall below the low water mark, we can wind up freeing
|
||||
* three leaves.
|
||||
*
|
||||
* That sequence is as follows, assuming the original keys are removed from
|
||||
* blocks A and B:
|
||||
*
|
||||
* Allocate new dirty A' and B'
|
||||
* Free old stable A and B
|
||||
* B' has fallen below the low water mark, so copy B' into A'
|
||||
* Free B'
|
||||
*
|
||||
* An extent insertion (freeing an extent) can dirty up to five distinct items
|
||||
* in the btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent:
|
||||
*
|
||||
* In the by-blkno portion of the btree, we can dirty (allocate for COW) up
|
||||
* to two blocks- either by merging adjacent extents, which can cause us to
|
||||
* join leaf blocks; or by an insertion that causes a split.
|
||||
*
|
||||
* In the by-size portion, we never merge extents, so normally we just dirty
|
||||
* a single item with a size insertion. But if we merged adjacent extents in
|
||||
* the by-blkno portion of the tree, we might be working with three by-sizex
|
||||
* items: removing the two old ones that were combined in the merge; and
|
||||
* adding the new one for the larger, merged size.
|
||||
*
|
||||
* Finally, dirtying the paths to these leaves can grow the tree and grow/shrink
|
||||
* neighbours at each level, so we multiply by the height of the tree after
|
||||
* accounting for a possible new level.
|
||||
*/
|
||||
static u32 extent_mod_blocks(u32 height)
|
||||
{
|
||||
return ((1 + height) * 3) * 5;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length order, largest first. The location key
|
||||
* field is set to the final block in the extent so that we can find
|
||||
* intersections by calling _next() with the start of the range we're
|
||||
* searching for.
|
||||
*
|
||||
* We never store 0 length extents but we do build keys for searching
|
||||
* the order index from 0,0 without having to map it to a real extent.
|
||||
* block location and by length, largest first. The block location key
|
||||
* is set to the final block in the extent so that we can find
|
||||
* intersections by calling _next() iterators starting with the block
|
||||
* we're searching for.
|
||||
*/
|
||||
static void init_ext_key(struct scoutfs_key *key, int zone, u64 start, u64 len)
|
||||
static void init_ext_key(struct scoutfs_key *key, int type, u64 start, u64 len)
|
||||
{
|
||||
*key = (struct scoutfs_key) {
|
||||
.sk_zone = zone,
|
||||
.sk_zone = SCOUTFS_FREE_EXTENT_ZONE,
|
||||
.sk_type = type,
|
||||
};
|
||||
|
||||
if (len == 0) {
|
||||
/* we only use 0 len extents for magic 0,0 order lookups */
|
||||
WARN_ON_ONCE(zone != SCOUTFS_FREE_EXTENT_ORDER_ZONE || start != 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
if (type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE) {
|
||||
key->skfb_end = cpu_to_le64(start + len - 1);
|
||||
key->skfb_len = cpu_to_le64(len);
|
||||
} else if (zone == SCOUTFS_FREE_EXTENT_ORDER_ZONE) {
|
||||
key->skfo_revord = cpu_to_le64(U64_MAX - free_extent_order(len));
|
||||
key->skfo_end = cpu_to_le64(start + len - 1);
|
||||
key->skfo_len = cpu_to_le64(len);
|
||||
} else if (type == SCOUTFS_FREE_EXTENT_LEN_TYPE) {
|
||||
key->skfl_neglen = cpu_to_le64(-len);
|
||||
key->skfl_blkno = cpu_to_le64(start);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
@@ -165,27 +93,23 @@ static void init_ext_key(struct scoutfs_key *key, int zone, u64 start, u64 len)
|
||||
|
||||
static void ext_from_key(struct scoutfs_extent *ext, struct scoutfs_key *key)
|
||||
{
|
||||
if (key->sk_zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
if (key->sk_type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE) {
|
||||
ext->start = le64_to_cpu(key->skfb_end) -
|
||||
le64_to_cpu(key->skfb_len) + 1;
|
||||
ext->len = le64_to_cpu(key->skfb_len);
|
||||
} else {
|
||||
ext->start = le64_to_cpu(key->skfo_end) -
|
||||
le64_to_cpu(key->skfo_len) + 1;
|
||||
ext->len = le64_to_cpu(key->skfo_len);
|
||||
ext->start = le64_to_cpu(key->skfl_blkno);
|
||||
ext->len = -le64_to_cpu(key->skfl_neglen);
|
||||
}
|
||||
ext->map = 0;
|
||||
ext->flags = 0;
|
||||
|
||||
/* we never store 0 length extents */
|
||||
WARN_ON_ONCE(ext->len == 0);
|
||||
}
|
||||
|
||||
struct alloc_ext_args {
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
struct scoutfs_alloc_root *root;
|
||||
int zone;
|
||||
int type;
|
||||
};
|
||||
|
||||
static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
@@ -196,13 +120,13 @@ static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
|
||||
ret = scoutfs_btree_next(sb, &args->root->root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len != 0)
|
||||
ret = -EIO;
|
||||
else if (iref.key->sk_zone != args->zone)
|
||||
else if (iref.key->sk_type != args->type)
|
||||
ret = -ENOENT;
|
||||
else
|
||||
ext_from_key(ext, iref.key);
|
||||
@@ -215,19 +139,19 @@ static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int other_zone(int zone)
|
||||
static int other_type(int type)
|
||||
{
|
||||
if (zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE)
|
||||
return SCOUTFS_FREE_EXTENT_ORDER_ZONE;
|
||||
else if (zone == SCOUTFS_FREE_EXTENT_ORDER_ZONE)
|
||||
return SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
if (type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE)
|
||||
return SCOUTFS_FREE_EXTENT_LEN_TYPE;
|
||||
else if (type == SCOUTFS_FREE_EXTENT_LEN_TYPE)
|
||||
return SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert an extent along with its matching item which is indexed by
|
||||
* opposite of its order or blkno. If we succeed we update the root's
|
||||
* opposite of its len or blkno. If we succeed we update the root's
|
||||
* record of the total length of all the stored extents.
|
||||
*/
|
||||
static int alloc_ext_insert(struct super_block *sb, void *arg,
|
||||
@@ -243,8 +167,8 @@ static int alloc_ext_insert(struct super_block *sb, void *arg,
|
||||
if (WARN_ON_ONCE(map || flags))
|
||||
return -EINVAL;
|
||||
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&other, other_zone(args->zone), start, len);
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&other, other_type(args->type), start, len);
|
||||
|
||||
ret = scoutfs_btree_insert(sb, args->alloc, args->wri,
|
||||
&args->root->root, &key, NULL, 0);
|
||||
@@ -272,8 +196,8 @@ static int alloc_ext_remove(struct super_block *sb, void *arg,
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&other, other_zone(args->zone), start, len);
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&other, other_type(args->type), start, len);
|
||||
|
||||
ret = scoutfs_btree_delete(sb, args->alloc, args->wri,
|
||||
&args->root->root, &key);
|
||||
@@ -297,7 +221,6 @@ static struct scoutfs_ext_ops alloc_ext_ops = {
|
||||
.next = alloc_ext_next,
|
||||
.insert = alloc_ext_insert,
|
||||
.remove = alloc_ext_remove,
|
||||
.insert_overlap_warn = true,
|
||||
};
|
||||
|
||||
static bool invalid_extent(u64 start, u64 end, u64 first, u64 last)
|
||||
@@ -307,17 +230,20 @@ static bool invalid_extent(u64 start, u64 end, u64 first, u64 last)
|
||||
|
||||
static bool invalid_meta_blkno(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
u64 last_meta = (i_size_read(sbi->meta_bdev->bd_inode) >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
|
||||
return invalid_extent(blkno, blkno, SCOUTFS_META_DEV_START_BLKNO, last_meta);
|
||||
return invalid_extent(blkno, blkno,
|
||||
le64_to_cpu(super->first_meta_blkno),
|
||||
le64_to_cpu(super->last_meta_blkno));
|
||||
}
|
||||
|
||||
static bool invalid_data_extent(struct super_block *sb, u64 start, u64 len)
|
||||
{
|
||||
u64 last_data = (i_size_read(sb->s_bdev->bd_inode) >> SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
|
||||
return invalid_extent(start, start + len - 1, SCOUTFS_DATA_DEV_START_BLKNO, last_data);
|
||||
return invalid_extent(start, start + len - 1,
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno));
|
||||
}
|
||||
|
||||
void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
|
||||
@@ -693,7 +619,7 @@ int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
@@ -719,14 +645,6 @@ int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
*
|
||||
* Unlike meta allocations, the caller is expected to serialize
|
||||
* allocations from the root.
|
||||
*
|
||||
* ENOBUFS is returned if the data allocator ran out of space and we can
|
||||
* probably refill it from the server. The caller is expected to back
|
||||
* out, commit the transaction, and try again.
|
||||
*
|
||||
* ENOSPC is returned if the data allocator ran out of space but we have
|
||||
* a flag from the server telling us that there's no more space
|
||||
* available. This is a hard error and should be returned.
|
||||
*/
|
||||
int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
@@ -737,7 +655,7 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
};
|
||||
struct scoutfs_extent ext;
|
||||
u64 len;
|
||||
@@ -775,13 +693,13 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
if (le32_to_cpu(dalloc->root.flags) & SCOUTFS_ALLOC_FLAG_LOW)
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = -ENOBUFS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Special retval meaning there wasn't space to alloc from
|
||||
* this txn. Doesn't mean filesystem is completely full.
|
||||
* Maybe upper layers want to try again.
|
||||
*/
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENOBUFS;
|
||||
*blkno_ret = 0;
|
||||
*count_ret = 0;
|
||||
} else {
|
||||
@@ -810,7 +728,7 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
@@ -823,95 +741,6 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the first zone bit that the extent intersects with.
|
||||
*/
|
||||
static int first_extent_zone(struct scoutfs_extent *ext, __le64 *zones, u64 zone_blocks)
|
||||
{
|
||||
int first;
|
||||
int last;
|
||||
int nr;
|
||||
|
||||
first = div64_u64(ext->start, zone_blocks);
|
||||
last = div64_u64(ext->start + ext->len - 1, zone_blocks);
|
||||
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, first);
|
||||
if (nr <= last)
|
||||
return nr;
|
||||
|
||||
return SCOUTFS_DATA_ALLOC_MAX_ZONES;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an extent in specific zones to satisfy an allocation. We use
|
||||
* the order items to search for the largest extent that intersects with
|
||||
* the zones whose bits are set in the caller's bitmap.
|
||||
*/
|
||||
static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
__le64 *zones, u64 zone_blocks,
|
||||
struct scoutfs_extent *found_ret, u64 count,
|
||||
struct scoutfs_extent *ext_ret)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext = {0,};
|
||||
u64 start;
|
||||
u64 len;
|
||||
int nr;
|
||||
int ret;
|
||||
|
||||
/* don't bother when there are no bits set */
|
||||
if (find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, 0) ==
|
||||
SCOUTFS_DATA_ALLOC_MAX_ZONES)
|
||||
return -ENOENT;
|
||||
|
||||
/* start searching for largest extent from the first zone */
|
||||
len = smallest_order_length(SCOUTFS_BLOCK_SM_MAX);
|
||||
nr = 0;
|
||||
|
||||
for (;;) {
|
||||
/* search for extents in the next zone at our order */
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, nr);
|
||||
if (nr >= SCOUTFS_DATA_ALLOC_MAX_ZONES) {
|
||||
/* wrap down to next smaller order if we run out of bits */
|
||||
len >>= 3;
|
||||
if (len == 0) {
|
||||
ret = -ENOENT;
|
||||
break;
|
||||
}
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, 0);
|
||||
}
|
||||
|
||||
start = (u64)nr * zone_blocks;
|
||||
|
||||
ret = scoutfs_ext_next(sb, &alloc_ext_ops, &args, start, len, &found);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* see if the next extent intersects any zones */
|
||||
nr = first_extent_zone(&found, zones, zone_blocks);
|
||||
if (nr < SCOUTFS_DATA_ALLOC_MAX_ZONES) {
|
||||
start = (u64)nr * zone_blocks;
|
||||
|
||||
ext.start = max(start, found.start);
|
||||
ext.len = min(count, found.start + found.len - ext.start);
|
||||
|
||||
*found_ret = found;
|
||||
*ext_ret = ext;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* continue searching past extent */
|
||||
nr = div64_u64(found.start + found.len - 1, zone_blocks) + 1;
|
||||
len = smallest_order_length(found.len);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move extent items adding up to the requested total length from the
|
||||
@@ -922,18 +751,6 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r
|
||||
* -ENOENT is returned if we run out of extents in the source tree
|
||||
* before moving the total.
|
||||
*
|
||||
* If meta_budget is non-zero then -EINPROGRESS can be returned if the
|
||||
* the caller's budget is consumed in the allocator during this call
|
||||
* (though not necessarily by us, we don't have per-thread tracking of
|
||||
* allocator consumption :/). The call can still have made progress and
|
||||
* caller is expected commit the dirty trees and examining the resulting
|
||||
* modified trees to see if they need to continue moving extents.
|
||||
*
|
||||
* The caller can specify that extents in the source tree should first
|
||||
* be found based on their zone bitmaps. We'll first try to find
|
||||
* extents in the exclusive zones, then vacant zones, and then we'll
|
||||
* fall back to normal allocation that ignores zones.
|
||||
*
|
||||
* This first pass is not optimal because it performs full btree walks
|
||||
* per extent. We could optimize this with more clever btree item
|
||||
* manipulation functions which can iterate through src and dst blocks
|
||||
@@ -942,100 +759,32 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r
|
||||
int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget)
|
||||
struct scoutfs_alloc_root *src, u64 total)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
u32 avail_start = 0;
|
||||
u32 freed_start = 0;
|
||||
u64 moved = 0;
|
||||
u64 count;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
if (zone_blocks == 0) {
|
||||
exclusive = NULL;
|
||||
vacant = NULL;
|
||||
}
|
||||
|
||||
if (meta_budget != 0)
|
||||
scoutfs_alloc_meta_remaining(alloc, &avail_start, &freed_start);
|
||||
|
||||
while (moved < total) {
|
||||
count = total - moved;
|
||||
|
||||
if (exclusive) {
|
||||
/* first try to find extents in our exclusive zones */
|
||||
ret = find_zone_extent(sb, src, exclusive, zone_blocks,
|
||||
&found, count, &ext);
|
||||
if (ret == -ENOENT) {
|
||||
exclusive = NULL;
|
||||
continue;
|
||||
}
|
||||
} else if (vacant) {
|
||||
/* then try to find extents in vacant zones */
|
||||
ret = find_zone_extent(sb, src, vacant, zone_blocks,
|
||||
&found, count, &ext);
|
||||
if (ret == -ENOENT) {
|
||||
vacant = NULL;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
/* otherwise fall back to finding extents anywhere */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE;
|
||||
ret = scoutfs_ext_next(sb, &alloc_ext_ops, &args, 0, 0, &found);
|
||||
if (ret == 0) {
|
||||
ext.start = found.start;
|
||||
ext.len = min(count, found.len);
|
||||
}
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (meta_budget != 0 &&
|
||||
scoutfs_alloc_meta_low_since(alloc, avail_start, freed_start, meta_budget,
|
||||
extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
ret = -EINPROGRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
/* return partial if the server alloc can't dirty any more */
|
||||
if (scoutfs_alloc_meta_low(sb, alloc, 50 + extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
if (WARN_ON_ONCE(!moved))
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* searching set start/len, finish initializing alloced extent */
|
||||
ext.map = found.map ? ext.start - found.start + found.map : 0;
|
||||
ext.flags = found.flags;
|
||||
|
||||
/* remove the allocation from the found extent */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
ret = scoutfs_ext_remove(sb, &alloc_ext_ops, &args, ext.start, ext.len);
|
||||
args.type = SCOUTFS_FREE_EXTENT_LEN_TYPE;
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args,
|
||||
0, 0, total - moved, &ext);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* insert the allocated extent into the dest */
|
||||
args.root = dst;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
args.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
ret = scoutfs_ext_insert(sb, &alloc_ext_ops, &args, ext.start,
|
||||
ext.len, ext.map, ext.flags);
|
||||
if (ret < 0) {
|
||||
/* and put it back in src if insertion failed */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
args.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
err = scoutfs_ext_insert(sb, &alloc_ext_ops, &args,
|
||||
ext.start, ext.len, ext.map,
|
||||
ext.flags);
|
||||
@@ -1045,8 +794,6 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
|
||||
moved += ext.len;
|
||||
scoutfs_inc_counter(sb, alloc_moved_extent);
|
||||
|
||||
trace_scoutfs_alloc_move_extent(sb, &ext);
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, alloc_move);
|
||||
@@ -1055,39 +802,6 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new free space to an allocator. _ext_insert will make sure that it doesn't
|
||||
* overlap with any existing extents. This is done by the server in a transaction that
|
||||
* also updates total_*_blocks in the super so we don't verify.
|
||||
*/
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
|
||||
return scoutfs_ext_insert(sb, &alloc_ext_ops, &args, start, len, 0, 0);
|
||||
}
|
||||
|
||||
int scoutfs_alloc_remove(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
|
||||
return scoutfs_ext_remove(sb, &alloc_ext_ops, &args, start, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* We only trim one block, instead of looping trimming all, because the
|
||||
* caller is assuming that we do a fixed amount of work when they check
|
||||
@@ -1134,22 +848,18 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the allocator has enough blocks in the avail list and space
|
||||
* in the freed list to be able to perform the callers operations. If
|
||||
* false the caller should back off and return partial progress rather
|
||||
* than completely exhausting the avail list or overflowing the freed
|
||||
* list.
|
||||
* True if the allocator has enough free blocks to cow (alloc and free)
|
||||
* a list block and all the btree blocks that store extent items.
|
||||
*
|
||||
* The caller tells us how many extents they're about to modify and how
|
||||
* many other additional blocks they may cow manually. And finally, the
|
||||
* caller could be the first to dirty the avail and freed blocks in the
|
||||
* allocator,
|
||||
* At most, an extent operation can dirty down three paths of the tree
|
||||
* to modify a blkno item and two distant len items. We can grow and
|
||||
* split the root, and then those three paths could share blocks but each
|
||||
* modify two leaf blocks.
|
||||
*/
|
||||
static bool list_has_blocks(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root, u32 extents, u32 addl_blocks)
|
||||
static bool list_can_cow(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root)
|
||||
{
|
||||
u32 tree_blocks = extent_mod_blocks(root->root.height) * extents;
|
||||
u32 most = 1 + tree_blocks + addl_blocks;
|
||||
u32 most = 1 + (1 + 1 + (3 * (1 - root->root.height + 1)));
|
||||
|
||||
if (le32_to_cpu(alloc->avail.first_nr) < most) {
|
||||
scoutfs_inc_counter(sb, alloc_list_avail_lo);
|
||||
@@ -1191,7 +901,7 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
};
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -1213,7 +923,8 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
goto out;
|
||||
lblk = bl->data;
|
||||
|
||||
while (le32_to_cpu(lblk->nr) < target && list_has_blocks(sb, alloc, root, 1, 0)) {
|
||||
while (le32_to_cpu(lblk->nr) < target &&
|
||||
list_can_cow(sb, alloc, root)) {
|
||||
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args, 0, 0,
|
||||
target - le32_to_cpu(lblk->nr), &ext);
|
||||
@@ -1225,8 +936,6 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
|
||||
for (i = 0; i < ext.len; i++)
|
||||
list_block_add(lhead, lblk, ext.start + i);
|
||||
|
||||
trace_scoutfs_alloc_fill_extent(sb, &ext);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -1249,7 +958,7 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
};
|
||||
struct scoutfs_alloc_list_block *lblk = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -1259,7 +968,7 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(lhead_in_alloc(alloc, lhead)))
|
||||
return -EINVAL;
|
||||
|
||||
while (lhead->ref.blkno && list_has_blocks(sb, alloc, args.root, 1, 1)) {
|
||||
while (lhead->ref.blkno && list_can_cow(sb, alloc, args.root)) {
|
||||
|
||||
if (lhead->first_nr == 0) {
|
||||
ret = trim_empty_first_block(sb, alloc, wri, lhead);
|
||||
@@ -1295,8 +1004,6 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
break;
|
||||
|
||||
list_block_remove(lhead, lblk, ext.len);
|
||||
|
||||
trace_scoutfs_alloc_empty_extent(sb, &ext);
|
||||
}
|
||||
|
||||
scoutfs_block_put(sb, bl);
|
||||
@@ -1384,82 +1091,37 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
return lo;
|
||||
}
|
||||
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space)
|
||||
{
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
*avail_total = le32_to_cpu(alloc->avail.first_nr);
|
||||
*freed_space = list_block_space(alloc->freed.first_nr);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the caller's consumption of nr from either avail or
|
||||
* freed would end up exceeding their budget relative to the starting
|
||||
* remaining snapshot they took.
|
||||
* Call the callers callback for every persistent allocator structure
|
||||
* we can find.
|
||||
*/
|
||||
bool scoutfs_alloc_meta_low_since(struct scoutfs_alloc *alloc, u32 avail_start, u32 freed_start,
|
||||
u32 budget, u32 nr)
|
||||
{
|
||||
u32 avail_use;
|
||||
u32 freed_use;
|
||||
u32 avail;
|
||||
u32 freed;
|
||||
|
||||
scoutfs_alloc_meta_remaining(alloc, &avail, &freed);
|
||||
|
||||
avail_use = avail_start - avail;
|
||||
freed_use = freed_start - freed;
|
||||
|
||||
return ((avail_use + nr) > budget) || ((freed_use + nr) > budget);
|
||||
}
|
||||
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag)
|
||||
{
|
||||
unsigned int seq;
|
||||
bool set;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
set = !!(le32_to_cpu(alloc->avail.flags) & flag);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
|
||||
return set;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over the allocator structures referenced by the caller's
|
||||
* super and call the caller's callback with summaries of the blocks
|
||||
* found in each structure.
|
||||
*
|
||||
* The caller's responsible for the stability of the referenced blocks.
|
||||
* If the blocks could be stale the caller must deal with retrying when
|
||||
* it sees ESTALE.
|
||||
*/
|
||||
int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_srch_compact *sc;
|
||||
struct scoutfs_log_merge_request *lmreq;
|
||||
struct scoutfs_log_merge_complete *lmcomp;
|
||||
struct scoutfs_log_trees lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int expected;
|
||||
u64 avail_tot;
|
||||
u64 freed_tot;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
sc = kmalloc(sizeof(struct scoutfs_srch_compact), GFP_NOFS);
|
||||
if (!sc) {
|
||||
if (!super || !sc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
refs[0] = super->logs_root.ref;
|
||||
refs[1] = super->srch_root.ref;
|
||||
|
||||
/* all the server allocators */
|
||||
ret = cb(sb, arg, SCOUTFS_ALLOC_OWNER_SERVER, 0, true, true,
|
||||
le64_to_cpu(super->meta_alloc[0].total_len)) ?:
|
||||
@@ -1549,152 +1211,19 @@ int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_blo
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
/* log merge allocators */
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_REQUEST_ZONE;
|
||||
expected = sizeof(*lmreq);
|
||||
id = 0;
|
||||
avail_tot = 0;
|
||||
freed_tot = 0;
|
||||
|
||||
for (;;) {
|
||||
ret = scoutfs_btree_next(sb, &super->log_merge, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.key->sk_zone != key.sk_zone) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len == expected) {
|
||||
key = *iref.key;
|
||||
if (key.sk_zone == SCOUTFS_LOG_MERGE_REQUEST_ZONE) {
|
||||
lmreq = iref.val;
|
||||
id = le64_to_cpu(lmreq->rid);
|
||||
avail_tot = le64_to_cpu(lmreq->meta_avail.total_nr);
|
||||
freed_tot = le64_to_cpu(lmreq->meta_freed.total_nr);
|
||||
} else {
|
||||
lmcomp = iref.val;
|
||||
id = le64_to_cpu(lmcomp->rid);
|
||||
avail_tot = le64_to_cpu(lmcomp->meta_avail.total_nr);
|
||||
freed_tot = le64_to_cpu(lmcomp->meta_freed.total_nr);
|
||||
}
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret == -ENOENT) {
|
||||
if (key.sk_zone == SCOUTFS_LOG_MERGE_REQUEST_ZONE) {
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_COMPLETE_ZONE;
|
||||
expected = sizeof(*lmcomp);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = cb(sb, arg, SCOUTFS_ALLOC_OWNER_LOG_MERGE, id, true, true, avail_tot) ?:
|
||||
cb(sb, arg, SCOUTFS_ALLOC_OWNER_LOG_MERGE, id, true, false, freed_tot);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&stale_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
BUILD_BUG_ON(sizeof(stale_refs) != sizeof(refs));
|
||||
memcpy(stale_refs, refs, sizeof(stale_refs));
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(super);
|
||||
kfree(sc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the current on-disk super and use it to walk the allocators and
|
||||
* call the caller's callback. This assumes that the super it's reading
|
||||
* could be stale and will retry if it encounters stale blocks.
|
||||
*/
|
||||
int scoutfs_alloc_foreach(struct super_block *sb, scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_alloc_foreach_super(sb, super, cb, arg);
|
||||
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &super->logs_root.ref,
|
||||
&super->srch_root.ref);
|
||||
} while (ret == -ESTALE);
|
||||
|
||||
out:
|
||||
kfree(super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct foreach_cb_args {
|
||||
scoutfs_alloc_extent_cb_t cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static int alloc_btree_extent_item_cb(struct super_block *sb, struct scoutfs_key *key, u64 seq,
|
||||
u8 flags, void *val, int val_len, void *arg)
|
||||
{
|
||||
struct foreach_cb_args *cba = arg;
|
||||
struct scoutfs_extent ext;
|
||||
|
||||
if (key->sk_zone != SCOUTFS_FREE_EXTENT_BLKNO_ZONE)
|
||||
return -ENOENT;
|
||||
|
||||
ext_from_key(&ext, key);
|
||||
cba->cb(sb, cba->cb_arg, &ext);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the caller's callback on each extent stored in the allocator's
|
||||
* btree. The callback sees extents called in order by starting blkno.
|
||||
*/
|
||||
int scoutfs_alloc_extents_cb(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
scoutfs_alloc_extent_cb_t cb, void *cb_arg)
|
||||
{
|
||||
struct foreach_cb_args cba = {
|
||||
.cb = cb,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_ext_key(&key, SCOUTFS_FREE_EXTENT_BLKNO_ZONE, 0, 1);
|
||||
|
||||
for (;;) {
|
||||
/* will stop at order items before getting stuck in final block */
|
||||
BUILD_BUG_ON(SCOUTFS_FREE_EXTENT_BLKNO_ZONE > SCOUTFS_FREE_EXTENT_ORDER_ZONE);
|
||||
init_ext_key(&start, SCOUTFS_FREE_EXTENT_BLKNO_ZONE, 0, 1);
|
||||
init_ext_key(&end, SCOUTFS_FREE_EXTENT_ORDER_ZONE, 0, 1);
|
||||
|
||||
ret = scoutfs_btree_read_items(sb, &root->root, &key, &start, &end,
|
||||
alloc_btree_extent_item_cb, &cba);
|
||||
if (ret < 0 || end.sk_zone != SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
key = end;
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -19,11 +19,14 @@
|
||||
(128ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* The default size that we'll try to preallocate. This is trying to
|
||||
* hit the limit of large efficient device writes while minimizing
|
||||
* wasted preallocation that is never used.
|
||||
* The largest aligned region that we'll try to allocate at the end of
|
||||
* the file as it's extended. This is also limited to the current file
|
||||
* size so we can only waste at most twice the total file size when
|
||||
* files are less than this. We try to keep this around the point of
|
||||
* diminishing returns in streaming performance of common data devices
|
||||
* to limit waste.
|
||||
*/
|
||||
#define SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS \
|
||||
#define SCOUTFS_DATA_EXTEND_PREALLOC_LIMIT \
|
||||
(8ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
@@ -35,10 +38,6 @@
|
||||
#define SCOUTFS_ALLOC_DATA_LG_THRESH \
|
||||
(8ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_ALLOC_DATA_REFILL_THRESH \
|
||||
((256ULL * 1024 * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* Fill client alloc roots to the target when they fall below the lo
|
||||
* threshold.
|
||||
@@ -56,16 +55,15 @@
|
||||
#define SCOUTFS_SERVER_DATA_FILL_LO \
|
||||
(1ULL * 1024 * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
|
||||
/*
|
||||
* Log merge meta allocations are only used for one request and will
|
||||
* never use more than the dirty limit.
|
||||
* Each of the server meta_alloc roots will try to keep a minimum amount
|
||||
* of free blocks. The server will swap roots when its current avail
|
||||
* falls below the threshold while the freed root is still above it. It
|
||||
* must have room for all the largest allocation attempted in a
|
||||
* transaction on the server.
|
||||
*/
|
||||
#define SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT (64ULL * 1024 * 1024)
|
||||
/* a few extra blocks for alloc blocks */
|
||||
#define SCOUTFS_SERVER_MERGE_FILL_TARGET \
|
||||
((SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT >> SCOUTFS_BLOCK_LG_SHIFT) + 4)
|
||||
#define SCOUTFS_SERVER_MERGE_FILL_LO SCOUTFS_SERVER_MERGE_FILL_TARGET
|
||||
#define SCOUTFS_SERVER_META_ALLOC_MIN \
|
||||
(SCOUTFS_SERVER_META_FILL_TARGET * 2)
|
||||
|
||||
/*
|
||||
* A run-time use of a pair of persistent avail/freed roots as a
|
||||
@@ -127,14 +125,7 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget);
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
int scoutfs_alloc_remove(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
struct scoutfs_alloc_root *src, u64 total);
|
||||
|
||||
int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
@@ -155,23 +146,11 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr);
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space);
|
||||
bool scoutfs_alloc_meta_low_since(struct scoutfs_alloc *alloc, u32 avail_start, u32 freed_start,
|
||||
u32 budget, u32 nr);
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag);
|
||||
|
||||
typedef int (*scoutfs_alloc_foreach_cb_t)(struct super_block *sb, void *arg,
|
||||
int owner, u64 id,
|
||||
bool meta, bool avail, u64 blocks);
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg);
|
||||
int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg);
|
||||
|
||||
typedef void (*scoutfs_alloc_extent_cb_t)(struct super_block *sb, void *cb_arg,
|
||||
struct scoutfs_extent *ext);
|
||||
int scoutfs_alloc_extents_cb(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
scoutfs_alloc_extent_cb_t cb, void *cb_arg);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,252 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "lock.h"
|
||||
#include "trans.h"
|
||||
#include "attr_x.h"
|
||||
|
||||
static int validate_attr_x_input(struct super_block *sb, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX__UNKNOWN) ||
|
||||
(iax->x_flags & SCOUTFS_IOC_IAX_F__UNKNOWN))
|
||||
return -EINVAL;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_RETENTION)))
|
||||
return ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID)))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mask indicates interest in the given attr then set the field
|
||||
* to the caller's value and return the new size if it didn't already
|
||||
* include the attr field.
|
||||
*/
|
||||
#define fill_attr(size, iax, bit, field, val) \
|
||||
({ \
|
||||
__typeof__(iax) _iax = (iax); \
|
||||
__typeof__(size) _size = (size); \
|
||||
\
|
||||
if (_iax->x_mask & (bit)) { \
|
||||
_iax->field = (val); \
|
||||
_size = max(_size, offsetof(struct scoutfs_ioctl_inode_attr_x, field) + \
|
||||
sizeof_field(struct scoutfs_ioctl_inode_attr_x, field)); \
|
||||
} \
|
||||
\
|
||||
_size; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Returns -errno on error, or >= number of bytes filled by the
|
||||
* response. 0 can be returned if no attributes are requested in the
|
||||
* input x_mask.
|
||||
*/
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
size_t size = 0;
|
||||
u64 offline;
|
||||
u64 online;
|
||||
u64 bits;
|
||||
int ret;
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_META_SEQ,
|
||||
meta_seq, scoutfs_inode_meta_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_SEQ,
|
||||
data_seq, scoutfs_inode_data_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_VERSION,
|
||||
data_version, scoutfs_inode_data_version(inode));
|
||||
if (iax->x_mask & (SCOUTFS_IOC_IAX_ONLINE_BLOCKS | SCOUTFS_IOC_IAX_OFFLINE_BLOCKS)) {
|
||||
scoutfs_inode_get_onoff(inode, &online, &offline);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_ONLINE_BLOCKS,
|
||||
online_blocks, online);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_OFFLINE_BLOCKS,
|
||||
offline_blocks, offline);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_sec, inode->i_ctime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_nsec, inode->i_ctime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_sec, si->crtime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_nsec, si->crtime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_SIZE, size, i_size_read(inode));
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX__BITS) {
|
||||
bits = 0;
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(scoutfs_inode_get_flags(inode) & SCOUTFS_INO_FLAG_RETENTION))
|
||||
bits |= SCOUTFS_IOC_IAX_B_RETENTION;
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX__BITS, bits, bits);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_PROJECT_ID,
|
||||
project_id, scoutfs_inode_get_proj(inode));
|
||||
|
||||
ret = size;
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valid_attr_changes(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
/* provided data_version must be non-zero */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version == 0))
|
||||
return false;
|
||||
|
||||
/* can only set size or data version in new regular files */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) ||
|
||||
(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)) &&
|
||||
(!S_ISREG(inode->i_mode) || scoutfs_inode_data_version(inode) != 0))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero data_version with non-zero size */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) && (iax->size > 0)) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) || (iax->data_version == 0)))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero size when setting offline extents to that size */
|
||||
if ((iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_SIZE) || (iax->size == 0)))
|
||||
return false;
|
||||
|
||||
/* the retention bit only applies to regular files */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) && !S_ISREG(inode->i_mode))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool set_data_seq;
|
||||
int ret;
|
||||
|
||||
/* initially all setting is root only, could loosen with finer grained checks */
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/* check for errors before making any changes */
|
||||
if (!valid_attr_changes(inode, iax)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* retention prevents modification unless also clearing retention */
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0 && !((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
!(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION)))
|
||||
goto unlock;
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version > 0))
|
||||
set_data_seq = true;
|
||||
else
|
||||
set_data_seq = false;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq, true);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
/* creating offline extent first, it might fail */
|
||||
if (iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) {
|
||||
ret = scoutfs_data_init_offline_extent(inode, iax->size, lock);
|
||||
if (ret)
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* make all changes once they're all checked and will succeed */
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)
|
||||
scoutfs_inode_set_data_version(inode, iax->data_version);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_SIZE)
|
||||
i_size_write(inode, iax->size);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CTIME) {
|
||||
inode->i_ctime.tv_sec = iax->ctime_sec;
|
||||
inode->i_ctime.tv_nsec = iax->ctime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CRTIME) {
|
||||
si->crtime.tv_sec = iax->crtime_sec;
|
||||
si->crtime.tv_nsec = iax->crtime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) {
|
||||
scoutfs_inode_set_flags(inode, ~SCOUTFS_INO_FLAG_RETENTION,
|
||||
(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION) ?
|
||||
SCOUTFS_INO_FLAG_RETENTION : 0);
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID)
|
||||
scoutfs_inode_set_proj(inode, iax->project_id);
|
||||
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
ret = 0;
|
||||
release:
|
||||
scoutfs_release_trans(sb);
|
||||
unlock:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
#ifndef _SCOUTFS_ATTR_X_H_
|
||||
#define _SCOUTFS_ATTR_X_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include "ioctl.h"
|
||||
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
|
||||
#endif
|
||||
638
kmod/src/block.c
638
kmod/src/block.c
File diff suppressed because it is too large
Load Diff
@@ -13,17 +13,6 @@ struct scoutfs_block {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct scoutfs_block_saved_refs {
|
||||
struct scoutfs_block_ref refs[2];
|
||||
};
|
||||
|
||||
#define DECLARE_SAVED_REFS(name) \
|
||||
struct scoutfs_block_saved_refs name = {{{0,}}}
|
||||
|
||||
int scoutfs_block_check_stale(struct super_block *sb, int ret,
|
||||
struct scoutfs_block_saved_refs *saved,
|
||||
struct scoutfs_block_ref *a, struct scoutfs_block_ref *b);
|
||||
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret);
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
|
||||
|
||||
1026
kmod/src/btree.c
1026
kmod/src/btree.c
File diff suppressed because it is too large
Load Diff
@@ -20,15 +20,13 @@ struct scoutfs_btree_item_ref {
|
||||
|
||||
/* caller gives an item to the callback */
|
||||
typedef int (*scoutfs_btree_item_cb)(struct super_block *sb,
|
||||
struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
struct scoutfs_key *key,
|
||||
void *val, int val_len, void *arg);
|
||||
|
||||
/* simple singly-linked list of items */
|
||||
struct scoutfs_btree_item_list {
|
||||
struct scoutfs_btree_item_list *next;
|
||||
struct scoutfs_key key;
|
||||
u64 seq;
|
||||
u8 flags;
|
||||
int val_len;
|
||||
u8 val[0];
|
||||
};
|
||||
@@ -84,49 +82,6 @@ int scoutfs_btree_insert_list(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_item_list *lst);
|
||||
|
||||
int scoutfs_btree_parent_range(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end);
|
||||
int scoutfs_btree_get_parent(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root);
|
||||
int scoutfs_btree_set_parent(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root);
|
||||
int scoutfs_btree_rebalance(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key);
|
||||
|
||||
/* merge input is a list of roots */
|
||||
struct scoutfs_btree_root_head {
|
||||
struct list_head head;
|
||||
struct scoutfs_btree_root root;
|
||||
};
|
||||
|
||||
int scoutfs_btree_merge(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
struct scoutfs_key *next_ret,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct list_head *input_list,
|
||||
bool subtree, int dirty_limit, int alloc_low, int merge_window);
|
||||
|
||||
int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int free_budget);
|
||||
|
||||
void scoutfs_btree_put_iref(struct scoutfs_btree_item_ref *iref);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -32,8 +31,6 @@
|
||||
#include "net.h"
|
||||
#include "endian_swap.h"
|
||||
#include "quorum.h"
|
||||
#include "omap.h"
|
||||
#include "trans.h"
|
||||
|
||||
/*
|
||||
* The client is responsible for maintaining a connection to the server.
|
||||
@@ -50,7 +47,6 @@ struct client_info {
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work connect_dwork;
|
||||
unsigned long connect_delay_jiffies;
|
||||
|
||||
u64 server_term;
|
||||
|
||||
@@ -69,7 +65,6 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_net_inode_alloc ial;
|
||||
__le64 lecount = cpu_to_le64(count);
|
||||
u64 tmp;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
@@ -82,7 +77,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
|
||||
if (*nr == 0)
|
||||
ret = -ENOSPC;
|
||||
else if (check_add_overflow(*ino, *nr - 1, &tmp))
|
||||
else if (*ino + *nr < *ino)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -119,6 +114,21 @@ int scoutfs_client_get_roots(struct super_block *sb,
|
||||
NULL, 0, roots, sizeof(*roots));
|
||||
}
|
||||
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
__le64 leseq;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
NULL, 0, &leseq, sizeof(leseq));
|
||||
if (ret == 0)
|
||||
*seq = le64_to_cpu(leseq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
@@ -140,7 +150,7 @@ static int client_lock_response(struct super_block *sb,
|
||||
void *resp, unsigned int resp_len,
|
||||
int error, void *data)
|
||||
{
|
||||
if (resp_len != sizeof(struct scoutfs_net_lock))
|
||||
if (resp_len != sizeof(struct scoutfs_net_lock_grant_response))
|
||||
return -EINVAL;
|
||||
|
||||
/* XXX error? */
|
||||
@@ -205,120 +215,6 @@ int scoutfs_client_srch_commit_compact(struct super_block *sb,
|
||||
res, sizeof(*res), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_request *req)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_GET_LOG_MERGE,
|
||||
NULL, 0, req, sizeof(*req));
|
||||
}
|
||||
|
||||
int scoutfs_client_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_complete *comp)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_MERGE,
|
||||
comp, sizeof(*comp), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_response(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
id, 0, map, sizeof(*map));
|
||||
}
|
||||
|
||||
/* The client is receiving an omap request from the server */
|
||||
static int client_open_ino_map(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
if (arg_len != sizeof(struct scoutfs_open_ino_map_args))
|
||||
return -EINVAL;
|
||||
|
||||
return scoutfs_omap_client_handle_request(sb, id, arg);
|
||||
}
|
||||
|
||||
/* The client is sending an omap request to the server */
|
||||
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_open_ino_map_args args = {
|
||||
.group_nr = cpu_to_le64(group_nr),
|
||||
.req_id = 0,
|
||||
};
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
&args, sizeof(args), map, sizeof(*map));
|
||||
}
|
||||
|
||||
/* The client is asking the server for the current volume options */
|
||||
int scoutfs_client_get_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_GET_VOLOPT,
|
||||
NULL, 0, volopt, sizeof(*volopt));
|
||||
}
|
||||
|
||||
/* The client is asking the server to update volume options */
|
||||
int scoutfs_client_set_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_SET_VOLOPT,
|
||||
volopt, sizeof(*volopt), NULL, 0);
|
||||
}
|
||||
|
||||
/* The client is asking the server to clear volume options */
|
||||
int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_CLEAR_VOLOPT,
|
||||
volopt, sizeof(*volopt), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_RESIZE_DEVICES,
|
||||
nrd, sizeof(*nrd), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_STATFS,
|
||||
NULL, 0, nst, sizeof(*nst));
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is asking that we trigger a commit of the current log
|
||||
* trees so that they can ensure an item seq discontinuity between
|
||||
* finalized log btrees and the next set of open log btrees. If we're
|
||||
* shutting down then we're already going to perform a final commit.
|
||||
*/
|
||||
static int sync_log_trees(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
if (arg_len != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (!scoutfs_unmounting(sb))
|
||||
scoutfs_trans_sync(sb, 0);
|
||||
|
||||
return scoutfs_net_response(sb, conn, cmd, id, 0, NULL, 0);
|
||||
}
|
||||
|
||||
/* The client is receiving a invalidation request from the server */
|
||||
static int client_lock(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn, u8 cmd, u64 id,
|
||||
@@ -356,8 +252,8 @@ static int client_greeting(struct super_block *sb,
|
||||
void *resp, unsigned int resp_len, int error,
|
||||
void *data)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct client_info *client = sbi->client_info;
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_net_greeting *gr = resp;
|
||||
bool new_server;
|
||||
int ret;
|
||||
@@ -372,16 +268,18 @@ static int client_greeting(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->fsid != cpu_to_le64(sbi->fsid)) {
|
||||
scoutfs_warn(sb, "server greeting response fsid 0x%llx did not match client fsid 0x%llx",
|
||||
le64_to_cpu(gr->fsid), sbi->fsid);
|
||||
if (gr->fsid != super->hdr.fsid) {
|
||||
scoutfs_warn(sb, "server sent fsid 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->fsid),
|
||||
le64_to_cpu(super->hdr.fsid));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(gr->fmt_vers) != sbi->fmt_vers) {
|
||||
scoutfs_warn(sb, "server greeting response format version %llu did not match client format version %llu",
|
||||
le64_to_cpu(gr->fmt_vers), sbi->fmt_vers);
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "server sent format 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -390,7 +288,6 @@ static int client_greeting(struct super_block *sb,
|
||||
scoutfs_net_client_greeting(sb, conn, new_server);
|
||||
|
||||
client->server_term = le64_to_cpu(gr->server_term);
|
||||
client->connect_delay_jiffies = 0;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
@@ -435,25 +332,11 @@ static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
kfree(super);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're not seeing successful connections we want to back off. Each
|
||||
* connection attempt starts by setting a long connection work delay.
|
||||
* We only set a shorter delay if we see a greeting response from the
|
||||
* server. At that point we'll try to immediately reconnect if the
|
||||
* connection is broken.
|
||||
*/
|
||||
static void queue_connect_dwork(struct super_block *sb, struct client_info *client)
|
||||
{
|
||||
if (!atomic_read(&client->shutting_down) && !scoutfs_forcing_unmount(sb))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork,
|
||||
client->connect_delay_jiffies);
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
@@ -477,15 +360,13 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
bool am_quorum;
|
||||
int ret;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
am_quorum = opts.quorum_slot_nr >= 0;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
@@ -495,9 +376,6 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* always wait a bit until a greeting response sets a lower delay */
|
||||
client->connect_delay_jiffies = msecs_to_jiffies(CLIENT_CONNECT_DELAY_MS);
|
||||
|
||||
ret = scoutfs_quorum_server_sin(sb, &sin);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -508,8 +386,8 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
goto out;
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = cpu_to_le64(sbi->fsid);
|
||||
greet.fmt_vers = cpu_to_le64(sbi->fmt_vers);
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.version = super->version;
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
greet.flags = 0;
|
||||
@@ -525,15 +403,16 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
out:
|
||||
if (ret)
|
||||
queue_connect_dwork(sb, client);
|
||||
|
||||
/* always have a small delay before retrying to avoid storms */
|
||||
if (ret && !atomic_read(&client->shutting_down))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork,
|
||||
msecs_to_jiffies(CLIENT_CONNECT_DELAY_MS));
|
||||
}
|
||||
|
||||
static scoutfs_net_request_t client_req_funcs[] = {
|
||||
[SCOUTFS_NET_CMD_SYNC_LOG_TREES] = sync_log_trees,
|
||||
[SCOUTFS_NET_CMD_LOCK] = client_lock,
|
||||
[SCOUTFS_NET_CMD_LOCK_RECOVER] = client_lock_recover,
|
||||
[SCOUTFS_NET_CMD_OPEN_INO_MAP] = client_open_ino_map,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -546,7 +425,8 @@ static void client_notify_down(struct super_block *sb,
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
queue_connect_dwork(sb, client);
|
||||
if (!atomic_read(&client->shutting_down))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_setup(struct super_block *sb)
|
||||
@@ -581,7 +461,7 @@ int scoutfs_client_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue_connect_dwork(sb, client);
|
||||
queue_delayed_work(client->workq, &client->connect_dwork, 0);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
@@ -638,7 +518,7 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
if (client == NULL)
|
||||
return;
|
||||
|
||||
if (client->server_term != 0 && !scoutfs_forcing_unmount(sb)) {
|
||||
if (client->server_term != 0) {
|
||||
client->sending_farewell = true;
|
||||
ret = scoutfs_net_submit_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_FAREWELL,
|
||||
@@ -646,8 +526,10 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
client_farewell_response,
|
||||
NULL, NULL);
|
||||
if (ret == 0) {
|
||||
wait_for_completion(&client->farewell_comp);
|
||||
ret = client->farewell_error;
|
||||
ret = wait_for_completion_interruptible(
|
||||
&client->farewell_comp);
|
||||
if (ret == 0)
|
||||
ret = client->farewell_error;
|
||||
}
|
||||
if (ret) {
|
||||
scoutfs_inc_counter(sb, client_farewell_error);
|
||||
@@ -671,11 +553,3 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
kfree(client);
|
||||
sbi->client_info = NULL;
|
||||
}
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
if (client && client->conn)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ int scoutfs_client_commit_log_trees(struct super_block *sb,
|
||||
int scoutfs_client_get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots);
|
||||
u64 *scoutfs_client_bulk_alloc(struct super_block *sb);
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq);
|
||||
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq);
|
||||
int scoutfs_client_lock_request(struct super_block *sb,
|
||||
struct scoutfs_net_lock *nl);
|
||||
@@ -21,21 +22,7 @@ int scoutfs_client_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *sc);
|
||||
int scoutfs_client_srch_commit_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *res);
|
||||
int scoutfs_client_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_request *req);
|
||||
int scoutfs_client_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_complete *comp);
|
||||
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map *map);
|
||||
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
|
||||
struct scoutfs_open_ino_map *map);
|
||||
int scoutfs_client_get_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_set_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd);
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst);
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb);
|
||||
int scoutfs_client_setup(struct super_block *sb);
|
||||
void scoutfs_client_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -26,15 +26,15 @@
|
||||
EXPAND_COUNTER(block_cache_alloc_page_order) \
|
||||
EXPAND_COUNTER(block_cache_alloc_virt) \
|
||||
EXPAND_COUNTER(block_cache_end_io_error) \
|
||||
EXPAND_COUNTER(block_cache_isolate_removed) \
|
||||
EXPAND_COUNTER(block_cache_isolate_rotate) \
|
||||
EXPAND_COUNTER(block_cache_isolate_skip) \
|
||||
EXPAND_COUNTER(block_cache_forget) \
|
||||
EXPAND_COUNTER(block_cache_free) \
|
||||
EXPAND_COUNTER(block_cache_free_work) \
|
||||
EXPAND_COUNTER(block_cache_remove_stale) \
|
||||
EXPAND_COUNTER(block_cache_count_objects) \
|
||||
EXPAND_COUNTER(block_cache_scan_objects) \
|
||||
EXPAND_COUNTER(block_cache_shrink) \
|
||||
EXPAND_COUNTER(block_cache_shrink_next) \
|
||||
EXPAND_COUNTER(block_cache_shrink_recent) \
|
||||
EXPAND_COUNTER(block_cache_shrink_remove) \
|
||||
EXPAND_COUNTER(block_cache_shrink_restart) \
|
||||
EXPAND_COUNTER(btree_compact_values) \
|
||||
EXPAND_COUNTER(btree_compact_values_enomem) \
|
||||
EXPAND_COUNTER(btree_delete) \
|
||||
@@ -44,16 +44,6 @@
|
||||
EXPAND_COUNTER(btree_insert) \
|
||||
EXPAND_COUNTER(btree_leaf_item_hash_search) \
|
||||
EXPAND_COUNTER(btree_lookup) \
|
||||
EXPAND_COUNTER(btree_merge) \
|
||||
EXPAND_COUNTER(btree_merge_alloc_low) \
|
||||
EXPAND_COUNTER(btree_merge_delete) \
|
||||
EXPAND_COUNTER(btree_merge_delta_combined) \
|
||||
EXPAND_COUNTER(btree_merge_delta_null) \
|
||||
EXPAND_COUNTER(btree_merge_dirty_limit) \
|
||||
EXPAND_COUNTER(btree_merge_drop_old) \
|
||||
EXPAND_COUNTER(btree_merge_insert) \
|
||||
EXPAND_COUNTER(btree_merge_update) \
|
||||
EXPAND_COUNTER(btree_merge_walk) \
|
||||
EXPAND_COUNTER(btree_next) \
|
||||
EXPAND_COUNTER(btree_prev) \
|
||||
EXPAND_COUNTER(btree_split) \
|
||||
@@ -75,6 +65,8 @@
|
||||
EXPAND_COUNTER(data_write_begin_enobufs_retry) \
|
||||
EXPAND_COUNTER(dentry_revalidate_error) \
|
||||
EXPAND_COUNTER(dentry_revalidate_invalid) \
|
||||
EXPAND_COUNTER(dentry_revalidate_locked) \
|
||||
EXPAND_COUNTER(dentry_revalidate_orphan) \
|
||||
EXPAND_COUNTER(dentry_revalidate_rcu) \
|
||||
EXPAND_COUNTER(dentry_revalidate_root) \
|
||||
EXPAND_COUNTER(dentry_revalidate_valid) \
|
||||
@@ -88,14 +80,9 @@
|
||||
EXPAND_COUNTER(forest_read_items) \
|
||||
EXPAND_COUNTER(forest_roots_next_hint) \
|
||||
EXPAND_COUNTER(forest_set_bloom_bits) \
|
||||
EXPAND_COUNTER(inode_deleted) \
|
||||
EXPAND_COUNTER(item_cache_count_objects) \
|
||||
EXPAND_COUNTER(item_cache_scan_objects) \
|
||||
EXPAND_COUNTER(item_clear_dirty) \
|
||||
EXPAND_COUNTER(item_create) \
|
||||
EXPAND_COUNTER(item_delete) \
|
||||
EXPAND_COUNTER(item_delta) \
|
||||
EXPAND_COUNTER(item_delta_written) \
|
||||
EXPAND_COUNTER(item_dirty) \
|
||||
EXPAND_COUNTER(item_invalidate) \
|
||||
EXPAND_COUNTER(item_invalidate_page) \
|
||||
@@ -116,18 +103,21 @@
|
||||
EXPAND_COUNTER(item_pcpu_page_hit) \
|
||||
EXPAND_COUNTER(item_pcpu_page_miss) \
|
||||
EXPAND_COUNTER(item_pcpu_page_miss_keys) \
|
||||
EXPAND_COUNTER(item_read_pages_barrier) \
|
||||
EXPAND_COUNTER(item_read_pages_retry) \
|
||||
EXPAND_COUNTER(item_read_pages_split) \
|
||||
EXPAND_COUNTER(item_shrink_page) \
|
||||
EXPAND_COUNTER(item_shrink_page_dirty) \
|
||||
EXPAND_COUNTER(item_shrink_page_reader) \
|
||||
EXPAND_COUNTER(item_shrink_page_trylock) \
|
||||
EXPAND_COUNTER(item_update) \
|
||||
EXPAND_COUNTER(item_write_dirty) \
|
||||
EXPAND_COUNTER(lock_alloc) \
|
||||
EXPAND_COUNTER(lock_free) \
|
||||
EXPAND_COUNTER(lock_grace_extended) \
|
||||
EXPAND_COUNTER(lock_grace_set) \
|
||||
EXPAND_COUNTER(lock_grace_wait) \
|
||||
EXPAND_COUNTER(lock_grant_request) \
|
||||
EXPAND_COUNTER(lock_grant_response) \
|
||||
EXPAND_COUNTER(lock_grant_work) \
|
||||
EXPAND_COUNTER(lock_invalidate_coverage) \
|
||||
EXPAND_COUNTER(lock_invalidate_inode) \
|
||||
EXPAND_COUNTER(lock_invalidate_request) \
|
||||
@@ -139,13 +129,10 @@
|
||||
EXPAND_COUNTER(lock_nonblock_eagain) \
|
||||
EXPAND_COUNTER(lock_recover_request) \
|
||||
EXPAND_COUNTER(lock_shrink_attempted) \
|
||||
EXPAND_COUNTER(lock_shrink_request_failed) \
|
||||
EXPAND_COUNTER(lock_shrink_aborted) \
|
||||
EXPAND_COUNTER(lock_shrink_work) \
|
||||
EXPAND_COUNTER(lock_unlock) \
|
||||
EXPAND_COUNTER(lock_wait) \
|
||||
EXPAND_COUNTER(log_merge_complete) \
|
||||
EXPAND_COUNTER(log_merge_no_finalized) \
|
||||
EXPAND_COUNTER(log_merge_start) \
|
||||
EXPAND_COUNTER(log_merge_wait_timeout) \
|
||||
EXPAND_COUNTER(net_dropped_response) \
|
||||
EXPAND_COUNTER(net_send_bytes) \
|
||||
EXPAND_COUNTER(net_send_error) \
|
||||
@@ -156,16 +143,6 @@
|
||||
EXPAND_COUNTER(net_recv_invalid_message) \
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_attempts) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_empty) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(quota_info_count_objects) \
|
||||
EXPAND_COUNTER(quota_info_scan_objects) \
|
||||
EXPAND_COUNTER(quorum_candidate_server_stopping) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
@@ -176,20 +153,17 @@
|
||||
EXPAND_COUNTER(quorum_recv_resignation) \
|
||||
EXPAND_COUNTER(quorum_recv_vote) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat_dropped) \
|
||||
EXPAND_COUNTER(quorum_send_resignation) \
|
||||
EXPAND_COUNTER(quorum_send_request) \
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
EXPAND_COUNTER(quorum_server_shutdown) \
|
||||
EXPAND_COUNTER(quorum_term_follower) \
|
||||
EXPAND_COUNTER(reclaimed_open_logs) \
|
||||
EXPAND_COUNTER(server_commit_hold) \
|
||||
EXPAND_COUNTER(server_commit_queue) \
|
||||
EXPAND_COUNTER(server_commit_worker) \
|
||||
EXPAND_COUNTER(srch_add_entry) \
|
||||
EXPAND_COUNTER(srch_compact_dirty_block) \
|
||||
EXPAND_COUNTER(srch_compact_entry) \
|
||||
EXPAND_COUNTER(srch_compact_error) \
|
||||
EXPAND_COUNTER(srch_compact_flush) \
|
||||
EXPAND_COUNTER(srch_compact_log_page) \
|
||||
EXPAND_COUNTER(srch_compact_removed_entry) \
|
||||
@@ -199,23 +173,21 @@
|
||||
EXPAND_COUNTER(srch_search_retry_empty) \
|
||||
EXPAND_COUNTER(srch_search_sorted) \
|
||||
EXPAND_COUNTER(srch_search_sorted_block) \
|
||||
EXPAND_COUNTER(srch_search_stale_eio) \
|
||||
EXPAND_COUNTER(srch_search_stale_retry) \
|
||||
EXPAND_COUNTER(srch_search_xattrs) \
|
||||
EXPAND_COUNTER(srch_read_stale) \
|
||||
EXPAND_COUNTER(statfs) \
|
||||
EXPAND_COUNTER(totl_read_copied) \
|
||||
EXPAND_COUNTER(totl_read_item) \
|
||||
EXPAND_COUNTER(trans_commit_data_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_dirty_meta_full) \
|
||||
EXPAND_COUNTER(trans_commit_fsync) \
|
||||
EXPAND_COUNTER(trans_commit_meta_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_sync_fs) \
|
||||
EXPAND_COUNTER(trans_commit_timer) \
|
||||
EXPAND_COUNTER(trans_commit_written) \
|
||||
EXPAND_COUNTER(wkic_count_objects) \
|
||||
EXPAND_COUNTER(wkic_scan_objects)
|
||||
EXPAND_COUNTER(trans_commit_written)
|
||||
|
||||
#define FIRST_COUNTER alloc_alloc_data
|
||||
#define LAST_COUNTER wkic_scan_objects
|
||||
#define LAST_COUNTER trans_commit_written
|
||||
|
||||
#undef EXPAND_COUNTER
|
||||
#define EXPAND_COUNTER(which) struct percpu_counter which;
|
||||
@@ -242,12 +214,12 @@ struct scoutfs_counters {
|
||||
#define SCOUTFS_PCPU_COUNTER_BATCH (1 << 30)
|
||||
|
||||
#define scoutfs_inc_counter(sb, which) \
|
||||
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, 1, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, 1, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
|
||||
#define scoutfs_add_counter(sb, which, cnt) \
|
||||
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, cnt, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, cnt, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
|
||||
void __init scoutfs_init_counters(void);
|
||||
int scoutfs_setup_counters(struct super_block *sb);
|
||||
|
||||
822
kmod/src/data.c
822
kmod/src/data.c
File diff suppressed because it is too large
Load Diff
@@ -38,14 +38,18 @@ struct scoutfs_data_wait {
|
||||
.err = 0, \
|
||||
}
|
||||
|
||||
struct scoutfs_traced_extent {
|
||||
u64 iblock;
|
||||
u64 count;
|
||||
u64 blkno;
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
extern const struct address_space_operations scoutfs_file_aops;
|
||||
extern const struct file_operations scoutfs_file_fops;
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
|
||||
int scoutfs_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh,
|
||||
int create);
|
||||
|
||||
int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
u64 ino, u64 iblock, u64 last, bool offline,
|
||||
struct scoutfs_lock *lock);
|
||||
@@ -55,8 +59,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len);
|
||||
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
u64 byte_len, struct inode *to, u64 to_off, bool to_stage,
|
||||
u64 data_version);
|
||||
u64 byte_len, struct inode *to, u64 to_off);
|
||||
|
||||
int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
u8 sef, u8 op, struct scoutfs_data_wait *ow,
|
||||
@@ -82,7 +85,7 @@ void scoutfs_data_init_btrees(struct super_block *sb,
|
||||
void scoutfs_data_get_btrees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt);
|
||||
int scoutfs_data_prepare_commit(struct super_block *sb);
|
||||
bool scoutfs_data_alloc_should_refill(struct super_block *sb, u64 blocks);
|
||||
u64 scoutfs_data_alloc_free_bytes(struct super_block *sb);
|
||||
|
||||
int scoutfs_data_setup(struct super_block *sb);
|
||||
void scoutfs_data_destroy(struct super_block *sb);
|
||||
|
||||
1061
kmod/src/dir.c
1061
kmod/src/dir.c
File diff suppressed because it is too large
Load Diff
@@ -5,24 +5,16 @@
|
||||
#include "lock.h"
|
||||
|
||||
extern const struct file_operations scoutfs_dir_fops;
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
extern const struct inode_operations_wrapper scoutfs_dir_iops;
|
||||
#else
|
||||
extern const struct inode_operations scoutfs_dir_iops;
|
||||
#endif
|
||||
extern const struct inode_operations scoutfs_symlink_iops;
|
||||
|
||||
extern const struct dentry_operations scoutfs_dentry_ops;
|
||||
|
||||
struct scoutfs_link_backref_entry {
|
||||
struct list_head head;
|
||||
u64 dir_ino;
|
||||
u64 dir_pos;
|
||||
u16 name_len;
|
||||
u8 d_type;
|
||||
bool last;
|
||||
struct scoutfs_dirent dent;
|
||||
/* the full name is allocated and stored in dent.name[] */
|
||||
/* the full name is allocated and stored in dent.name[0] */
|
||||
};
|
||||
|
||||
int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,
|
||||
@@ -30,10 +22,14 @@ int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,
|
||||
void scoutfs_dir_free_backref_path(struct super_block *sb,
|
||||
struct list_head *list);
|
||||
|
||||
int scoutfs_dir_add_next_linkrefs(struct super_block *sb, u64 ino, u64 dir_ino, u64 dir_pos,
|
||||
int count, struct list_head *list);
|
||||
int scoutfs_dir_add_next_linkref(struct super_block *sb, u64 ino,
|
||||
u64 dir_ino, u64 dir_pos,
|
||||
struct list_head *list);
|
||||
|
||||
int scoutfs_symlink_drop(struct super_block *sb, u64 ino,
|
||||
struct scoutfs_lock *lock, u64 i_size);
|
||||
|
||||
int scoutfs_dir_init(void);
|
||||
void scoutfs_dir_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -81,7 +81,7 @@ static struct dentry *scoutfs_fh_to_dentry(struct super_block *sb,
|
||||
trace_scoutfs_fh_to_dentry(sb, fh_type, sfid);
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type))
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino), 0, SCOUTFS_IGF_LINKED);
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino));
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -100,7 +100,7 @@ static struct dentry *scoutfs_fh_to_parent(struct super_block *sb,
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type) &&
|
||||
fh_type == FILEID_SCOUTFS_WITH_PARENT)
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino), 0, SCOUTFS_IGF_LINKED);
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino));
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -114,8 +114,8 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
int ret;
|
||||
u64 ino;
|
||||
|
||||
ret = scoutfs_dir_add_next_linkrefs(sb, scoutfs_ino(inode), 0, 0, 1, &list);
|
||||
if (ret < 0)
|
||||
ret = scoutfs_dir_add_next_linkref(sb, scoutfs_ino(inode), 0, 0, &list);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ent = list_first_entry(&list, struct scoutfs_link_backref_entry, head);
|
||||
@@ -123,7 +123,7 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
scoutfs_dir_free_backref_path(sb, &list);
|
||||
trace_scoutfs_get_parent(sb, inode, ino);
|
||||
|
||||
inode = scoutfs_iget(sb, ino, 0, SCOUTFS_IGF_LINKED);
|
||||
inode = scoutfs_iget(sb, ino);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -138,9 +138,9 @@ static int scoutfs_get_name(struct dentry *parent, char *name,
|
||||
LIST_HEAD(list);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_dir_add_next_linkrefs(sb, scoutfs_ino(inode), dir_ino,
|
||||
0, 1, &list);
|
||||
if (ret < 0)
|
||||
ret = scoutfs_dir_add_next_linkref(sb, scoutfs_ino(inode), dir_ino,
|
||||
0, &list);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = -ENOENT;
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "msg.h"
|
||||
#include "ext.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
@@ -39,7 +38,7 @@ static bool ext_overlap(struct scoutfs_extent *ext, u64 start, u64 len)
|
||||
return !(e_end < start || ext->start > end);
|
||||
}
|
||||
|
||||
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
|
||||
static bool ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
|
||||
{
|
||||
u64 in_end = start + len - 1;
|
||||
u64 out_end = out->start + out->len - 1;
|
||||
@@ -192,9 +191,6 @@ int scoutfs_ext_insert(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
/* inserting extent must not overlap */
|
||||
if (found.len && ext_overlap(&ins, found.start, found.len)) {
|
||||
if (ops->insert_overlap_warn)
|
||||
scoutfs_err(sb, "inserting extent %llu.%llu overlaps existing %llu.%llu",
|
||||
start, len, found.start, found.len);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -245,9 +241,7 @@ int scoutfs_ext_remove(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
goto out;
|
||||
|
||||
/* removed extent must be entirely within found */
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
scoutfs_err(sb, "error removing extent %llu.%llu, isn't inside existing %llu.%llu",
|
||||
start, len, found.start, found.len);
|
||||
if (!ext_inside(start, len, &found)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -347,7 +341,7 @@ int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
if (ret == 0 && ext_overlap(&found, start, len)) {
|
||||
/* set extent must be entirely within found */
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
if (!ext_inside(start, len, &found)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -15,8 +15,6 @@ struct scoutfs_ext_ops {
|
||||
u64 start, u64 len, u64 map, u8 flags);
|
||||
int (*remove)(struct super_block *sb, void *arg, u64 start, u64 len,
|
||||
u64 map, u8 flags);
|
||||
|
||||
bool insert_overlap_warn;
|
||||
};
|
||||
|
||||
bool scoutfs_ext_can_merge(struct scoutfs_extent *left,
|
||||
@@ -33,6 +31,5 @@ int scoutfs_ext_alloc(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
struct scoutfs_extent *ext);
|
||||
int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
void *arg, u64 start, u64 len, u64 map, u8 flags);
|
||||
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out);
|
||||
|
||||
#endif
|
||||
|
||||
481
kmod/src/fence.c
481
kmod/src/fence.c
@@ -1,481 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/timer.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "msg.h"
|
||||
#include "sysfs.h"
|
||||
#include "server.h"
|
||||
#include "fence.h"
|
||||
|
||||
/*
|
||||
* Fencing ensures that a given mount can no longer write to the
|
||||
* metadata or data devices. It's necessary to ensure that it's safe to
|
||||
* give another mount access to a resource that is currently owned by a
|
||||
* mount that has stopped responding.
|
||||
*
|
||||
* Fencing is performed in collaboration between the currently elected
|
||||
* quorum leader mount and userspace running on its host. The kernel
|
||||
* creates fencing requests as it notices that mounts have stopped
|
||||
* participating. The fence requests are published as directories in
|
||||
* sysfs. Userspace agents watch for directories, take action, and
|
||||
* write to files in the directory to indicate that the mount has been
|
||||
* fenced. Once the mount is fenced the server can reclaim the
|
||||
* resources previously held by the fenced mount.
|
||||
*
|
||||
* The fence requests contain metadata identifying the specific instance
|
||||
* of the mount that needs to be fenced. This lets a fencing agent
|
||||
* ensure that a specific mount has been fenced without necessarily
|
||||
* destroying the node that was hosting it. Maybe the node had rebooted
|
||||
* and the mount is no longer there, maybe the mount can be force
|
||||
* unmounted, maybe the node can be configured to isolate the mount from
|
||||
* the devices.
|
||||
*
|
||||
* The fencing mechanism is asynchronous and can fail but the server
|
||||
* cannot make progress until it completes. If a fence request times
|
||||
* out the server shuts down in the hope that another instance of a
|
||||
* server might have more luck fencing a non-responsive mount.
|
||||
*
|
||||
* Sources of fencing are fundamentally anchored in shared persistent
|
||||
* state. It is possible, though unlikely, that servers can fence a
|
||||
* node and then themselves fail, leaving the next server to try and
|
||||
* fence the mount again.
|
||||
*/
|
||||
|
||||
struct fence_info {
|
||||
struct kset *kset;
|
||||
struct kobject fence_dir_kobj;
|
||||
struct workqueue_struct *wq;
|
||||
wait_queue_head_t waitq;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define DECLARE_FENCE_INFO(sb, name) \
|
||||
struct fence_info *name = SCOUTFS_SB(sb)->fence_info
|
||||
|
||||
struct pending_fence {
|
||||
struct super_block *sb;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
struct list_head entry;
|
||||
struct timer_list timer;
|
||||
|
||||
ktime_t start_kt;
|
||||
__be32 ipv4_addr;
|
||||
bool fenced;
|
||||
bool error;
|
||||
int reason;
|
||||
u64 rid;
|
||||
};
|
||||
|
||||
#define FENCE_FROM_KOBJ(kobj) \
|
||||
container_of(SCOUTFS_SYSFS_ATTRS(kobj), struct pending_fence, ssa)
|
||||
#define DECLARE_FENCE_FROM_KOBJ(name, kobj) \
|
||||
struct pending_fence *name = FENCE_FROM_KOBJ(kobj)
|
||||
|
||||
static void destroy_fence(struct pending_fence *fence)
|
||||
{
|
||||
struct super_block *sb = fence->sb;
|
||||
|
||||
scoutfs_sysfs_destroy_attrs(sb, &fence->ssa);
|
||||
del_timer_sync(&fence->timer);
|
||||
kfree(fence);
|
||||
}
|
||||
|
||||
static ssize_t elapsed_secs_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
ktime_t now = ktime_get();
|
||||
ktime_t t = ns_to_ktime(0);
|
||||
|
||||
if (ktime_after(now, fence->start_kt))
|
||||
t = ktime_sub(now, fence->start_kt);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)ktime_divns(t, NSEC_PER_SEC));
|
||||
}
|
||||
SCOUTFS_ATTR_RO(elapsed_secs);
|
||||
|
||||
static ssize_t fenced_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", !!fence->fenced);
|
||||
}
|
||||
|
||||
/*
|
||||
* any write to the fenced file from userspace indicates that the mount
|
||||
* has been safely fenced and can no longer write to the shared device.
|
||||
*/
|
||||
static ssize_t fenced_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
DECLARE_FENCE_INFO(fence->sb, fi);
|
||||
|
||||
if (!fence->fenced) {
|
||||
del_timer_sync(&fence->timer);
|
||||
fence->fenced = true;
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(fenced);
|
||||
|
||||
static ssize_t error_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", !!fence->error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fencing can tell us that they were unable to fence the given mount.
|
||||
* We can't continue if the mount can't be isolated so we shut down the
|
||||
* server.
|
||||
*/
|
||||
static ssize_t error_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
struct super_block *sb = fence->sb;
|
||||
DECLARE_FENCE_INFO(fence->sb, fi);
|
||||
|
||||
if (!fence->error) {
|
||||
fence->error = true;
|
||||
scoutfs_err(sb, "error indicated by fence action for rid %016llx", fence->rid);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(error);
|
||||
|
||||
static ssize_t ipv4_addr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pI4", &fence->ipv4_addr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(ipv4_addr);
|
||||
|
||||
static ssize_t reason_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
unsigned r = fence->reason;
|
||||
char *str = "unknown";
|
||||
static char *reasons[] = {
|
||||
[SCOUTFS_FENCE_CLIENT_RECOVERY] = "client_recovery",
|
||||
[SCOUTFS_FENCE_CLIENT_RECONNECT] = "client_reconnect",
|
||||
[SCOUTFS_FENCE_QUORUM_BLOCK_LEADER] = "quorum_block_leader",
|
||||
};
|
||||
|
||||
if (r < ARRAY_SIZE(reasons) && reasons[r])
|
||||
str = reasons[r];
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", str);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(reason);
|
||||
|
||||
static ssize_t rid_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx", fence->rid);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(rid);
|
||||
|
||||
static struct attribute *fence_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(elapsed_secs),
|
||||
SCOUTFS_ATTR_PTR(fenced),
|
||||
SCOUTFS_ATTR_PTR(error),
|
||||
SCOUTFS_ATTR_PTR(ipv4_addr),
|
||||
SCOUTFS_ATTR_PTR(reason),
|
||||
SCOUTFS_ATTR_PTR(rid),
|
||||
NULL,
|
||||
};
|
||||
|
||||
#define FENCE_TIMEOUT_MS (MSEC_PER_SEC * 30)
|
||||
|
||||
static void fence_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct pending_fence *fence = from_timer(fence, timer, timer);
|
||||
struct super_block *sb = fence->sb;
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
|
||||
fence->error = true;
|
||||
scoutfs_err(sb, "fence request for rid %016llx was not serviced in %lums, raising error",
|
||||
fence->rid, FENCE_TIMEOUT_MS);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret;
|
||||
|
||||
fence = kzalloc(sizeof(struct pending_fence), GFP_NOFS);
|
||||
if (!fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fence->sb = sb;
|
||||
scoutfs_sysfs_init_attrs(sb, &fence->ssa);
|
||||
|
||||
fence->start_kt = ktime_get();
|
||||
fence->ipv4_addr = ipv4_addr;
|
||||
fence->fenced = false;
|
||||
fence->error = false;
|
||||
fence->reason = reason;
|
||||
fence->rid = rid;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs_parent(sb, &fi->kset->kobj,
|
||||
&fence->ssa, fence_attrs,
|
||||
"%016llx", rid);
|
||||
if (ret < 0) {
|
||||
kfree(fence);
|
||||
goto out;
|
||||
}
|
||||
|
||||
timer_setup(&fence->timer, fence_timeout, 0);
|
||||
fence->timer.expires = jiffies + msecs_to_jiffies(FENCE_TIMEOUT_MS);
|
||||
add_timer(&fence->timer);
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_add_tail(&fence->entry, &fi->list);
|
||||
spin_unlock(&fi->lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the caller the rid of the next fence request which has been
|
||||
* fenced. This doesn't have a position from which to return the next
|
||||
* because the caller either frees the fence request it's given or shuts
|
||||
* down.
|
||||
*/
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret = -ENOENT;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->fenced || fence->error) {
|
||||
*rid = fence->rid;
|
||||
*reason = fence->reason;
|
||||
*error = fence->error;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
bool pending = false;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->reason == reason) {
|
||||
pending = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret = -ENOENT;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->rid == rid) {
|
||||
list_del_init(&fence->entry);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (ret == 0) {
|
||||
destroy_fence(fence);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool all_fenced(struct fence_info *fi, bool *error)
|
||||
{
|
||||
struct pending_fence *fence;
|
||||
bool all = true;
|
||||
|
||||
*error = false;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->error) {
|
||||
*error = true;
|
||||
all = true;
|
||||
break;
|
||||
}
|
||||
if (!fence->fenced) {
|
||||
all = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return all;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller waits for all the current requests to be fenced, but not
|
||||
* necessarily reclaimed.
|
||||
*/
|
||||
int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
bool error;
|
||||
long ret;
|
||||
|
||||
ret = wait_event_timeout(fi->waitq, all_fenced(fi, &error), timeout_jiffies);
|
||||
if (ret == 0)
|
||||
ret = -ETIMEDOUT;
|
||||
else if (ret > 0)
|
||||
ret = 0;
|
||||
else if (error)
|
||||
ret = -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called early during startup so that it is guaranteed that
|
||||
* no other subsystems will try and call fence_start while we're waiting
|
||||
* for testing fence requests to complete.
|
||||
*/
|
||||
int scoutfs_fence_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct fence_info *fi;
|
||||
int ret;
|
||||
|
||||
/* can only fence if we can be elected by quorum */
|
||||
scoutfs_options_read(sb, &opts);
|
||||
if (opts.quorum_slot_nr == -1) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fi = kzalloc(sizeof(struct fence_info), GFP_KERNEL);
|
||||
if (!fi) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&fi->waitq);
|
||||
spin_lock_init(&fi->lock);
|
||||
INIT_LIST_HEAD(&fi->list);
|
||||
|
||||
sbi->fence_info = fi;
|
||||
|
||||
fi->kset = kset_create_and_add("fence", NULL, scoutfs_sysfs_sb_dir(sb));
|
||||
if (!fi->kset) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fi->wq = alloc_workqueue("scoutfs_fence",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0);
|
||||
if (!fi->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_fence_destroy(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down all pending fence requests because the server is shutting down.
|
||||
*/
|
||||
void scoutfs_fence_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
|
||||
do {
|
||||
spin_lock(&fi->lock);
|
||||
fence = list_first_entry_or_null(&fi->list, struct pending_fence, entry);
|
||||
if (fence)
|
||||
list_del_init(&fence->entry);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (fence) {
|
||||
destroy_fence(fence);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
} while (fence);
|
||||
}
|
||||
|
||||
void scoutfs_fence_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct fence_info *fi = SCOUTFS_SB(sb)->fence_info;
|
||||
struct pending_fence *fence;
|
||||
struct pending_fence *tmp;
|
||||
|
||||
if (fi) {
|
||||
if (fi->wq)
|
||||
destroy_workqueue(fi->wq);
|
||||
list_for_each_entry_safe(fence, tmp, &fi->list, entry)
|
||||
destroy_fence(fence);
|
||||
if (fi->kset)
|
||||
kset_unregister(fi->kset);
|
||||
kfree(fi);
|
||||
sbi->fence_info = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
#ifndef _SCOUTFS_FENCE_H_
|
||||
#define _SCOUTFS_FENCE_H_
|
||||
|
||||
enum {
|
||||
SCOUTFS_FENCE_CLIENT_RECOVERY,
|
||||
SCOUTFS_FENCE_CLIENT_RECONNECT,
|
||||
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER,
|
||||
};
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason);
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error);
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason);
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid);
|
||||
int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies);
|
||||
|
||||
int scoutfs_fence_setup(struct super_block *sb);
|
||||
void scoutfs_fence_stop(struct super_block *sb);
|
||||
void scoutfs_fence_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
176
kmod/src/file.c
176
kmod/src/file.c
@@ -27,16 +27,8 @@
|
||||
#include "file.h"
|
||||
#include "inode.h"
|
||||
#include "per_task.h"
|
||||
#include "omap.h"
|
||||
#include "quota.h"
|
||||
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
/*
|
||||
* Start a high level file read. We check for offline extents in the
|
||||
* read region here so that we only check the extents once. We use the
|
||||
* dio count to prevent releasing while we're reading after we've
|
||||
* checked the extents.
|
||||
*/
|
||||
/* TODO: Direct I/O, AIO */
|
||||
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos)
|
||||
{
|
||||
@@ -44,39 +36,37 @@ ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
inode_lock(inode);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
inode_unlock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
/* protect checked extents from stage/release */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
ret = scoutfs_data_wait_check_iov(inode, iov, nr_segs, pos,
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ,
|
||||
&dw, scoutfs_inode_lock);
|
||||
&dw, inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
|
||||
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
|
||||
|
||||
out:
|
||||
inode_dio_done(inode);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_READ);
|
||||
if (scoutfs_per_task_del(&si->pt_data_lock, &pt_ent))
|
||||
inode_dio_done(inode);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
@@ -94,7 +84,7 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
@@ -103,42 +93,34 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
inode_lock(inode);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
ret = scoutfs_complete_truncate(inode, inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode),
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE,
|
||||
&dw, scoutfs_inode_lock);
|
||||
&dw, inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
@@ -156,119 +138,8 @@ out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
inode_lock(inode);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
inode_unlock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
ret = scoutfs_data_wait_check(inode, iocb->ki_pos, iov_iter_count(to), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
|
||||
out:
|
||||
inode_dio_end(inode);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
ssize_t ret;
|
||||
|
||||
retry:
|
||||
inode_lock(inode);
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = generic_write_checks(iocb, from);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_write_iter(iocb, from);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask)
|
||||
int scoutfs_permission(struct inode *inode, int mask)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
@@ -282,8 +153,7 @@ int scoutfs_permission(KC_VFS_NS_DEF
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = generic_permission(KC_VFS_INIT_NS
|
||||
inode, mask);
|
||||
ret = generic_permission(inode, mask);
|
||||
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
|
||||
@@ -1,17 +1,11 @@
|
||||
#ifndef _SCOUTFS_FILE_H_
|
||||
#define _SCOUTFS_FILE_H_
|
||||
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
#else
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *, struct iov_iter *);
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *, struct iov_iter *);
|
||||
#endif
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask);
|
||||
int scoutfs_permission(struct inode *inode, int mask);
|
||||
loff_t scoutfs_file_llseek(struct file *file, loff_t offset, int whence);
|
||||
|
||||
#endif /* _SCOUTFS_FILE_H_ */
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "hash.h"
|
||||
#include "srch.h"
|
||||
#include "counters.h"
|
||||
#include "xattr.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -38,9 +37,9 @@
|
||||
*
|
||||
* The log btrees are modified by multiple transactions over time so
|
||||
* there is no consistent ordering relationship between the items in
|
||||
* different btrees. Each item in a log btree stores a seq for the
|
||||
* item. Readers check log btrees for the most recent seq that it
|
||||
* should use.
|
||||
* different btrees. Each item in a log btree stores a version number
|
||||
* for the item. Readers check log btrees for the most recent version
|
||||
* that it should use.
|
||||
*
|
||||
* The item cache reads items in bulk from stable btrees, and writes a
|
||||
* transaction's worth of dirty items into the item log btree.
|
||||
@@ -53,8 +52,6 @@
|
||||
*/
|
||||
|
||||
struct forest_info {
|
||||
struct super_block *sb;
|
||||
|
||||
struct mutex mutex;
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
@@ -63,11 +60,6 @@ struct forest_info {
|
||||
struct mutex srch_mutex;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
struct scoutfs_block *srch_bl;
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work log_merge_dwork;
|
||||
|
||||
atomic64_t inode_count_delta;
|
||||
};
|
||||
|
||||
#define DECLARE_FOREST_INFO(sb, name) \
|
||||
@@ -78,6 +70,11 @@ struct forest_refs {
|
||||
struct scoutfs_block_ref logs_ref;
|
||||
};
|
||||
|
||||
/* initialize some refs that initially aren't equal */
|
||||
#define DECLARE_STALE_TRACKING_SUPER_REFS(a, b) \
|
||||
struct forest_refs a = {{cpu_to_le64(0),}}; \
|
||||
struct forest_refs b = {{cpu_to_le64(1),}}
|
||||
|
||||
struct forest_bloom_nrs {
|
||||
unsigned int nrs[SCOUTFS_FOREST_BLOOM_NRS];
|
||||
};
|
||||
@@ -131,11 +128,11 @@ static struct scoutfs_block *read_bloom_ref(struct super_block *sb, struct scout
|
||||
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *next)
|
||||
{
|
||||
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_btree_root item_root;
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
struct scoutfs_key found;
|
||||
struct scoutfs_key ltk;
|
||||
bool checked_fs;
|
||||
@@ -150,6 +147,8 @@ retry:
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
refs.fs_ref = roots.fs_root.ref;
|
||||
refs.logs_ref = roots.logs_root.ref;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
checked_fs = false;
|
||||
@@ -205,25 +204,37 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.fs_root.ref, &roots.logs_root.ref);
|
||||
if (ret == -ESTALE)
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
|
||||
return -EIO;
|
||||
prev_refs = refs;
|
||||
goto retry;
|
||||
}
|
||||
out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct forest_read_items_data {
|
||||
int fic;
|
||||
bool is_fs;
|
||||
scoutfs_forest_item_cb cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
static int forest_read_items(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, void *arg)
|
||||
{
|
||||
struct forest_read_items_data *rid = arg;
|
||||
struct scoutfs_log_item_value _liv = {0,};
|
||||
struct scoutfs_log_item_value *liv = &_liv;
|
||||
|
||||
return rid->cb(sb, key, seq, flags, val, val_len, rid->fic, rid->cb_arg);
|
||||
if (!rid->is_fs) {
|
||||
liv = val;
|
||||
val += sizeof(struct scoutfs_log_item_value);
|
||||
val_len -= sizeof(struct scoutfs_log_item_value);
|
||||
}
|
||||
|
||||
return rid->cb(sb, key, liv, val, val_len, rid->cb_arg);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -235,48 +246,60 @@ static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u6
|
||||
* that covers all the blocks. Any keys outside of this range can't be
|
||||
* trusted because we didn't visit all the trees to check their items.
|
||||
*
|
||||
* We return -ESTALE if we hit stale blocks to give the caller a chance
|
||||
* to reset their state and retry with a newer version of the btrees.
|
||||
* If we hit stale blocks and retry we can call the callback for
|
||||
* duplicate items. This is harmless because the items are stable while
|
||||
* the caller holds their cluster lock and the caller has to filter out
|
||||
* item versions anyway.
|
||||
*/
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_lock *lock,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
|
||||
struct forest_read_items_data rid = {
|
||||
.cb = cb,
|
||||
.cb_arg = arg,
|
||||
};
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_bloom_block *bb;
|
||||
struct forest_bloom_nrs bloom;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_block *bl;
|
||||
struct scoutfs_key ltk;
|
||||
struct scoutfs_key orig_start = *start;
|
||||
struct scoutfs_key orig_end = *end;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
scoutfs_inc_counter(sb, forest_read_items);
|
||||
calc_bloom_nrs(&bloom, bloom_key);
|
||||
calc_bloom_nrs(&bloom, &lock->start);
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots->fs_root, &roots->logs_root);
|
||||
roots = lock->roots;
|
||||
retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
*start = orig_start;
|
||||
*end = orig_end;
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
refs.fs_ref = roots.fs_root.ref;
|
||||
refs.logs_ref = roots.logs_root.ref;
|
||||
|
||||
*start = lock->start;
|
||||
*end = lock->end;
|
||||
|
||||
/* start with fs root items */
|
||||
rid.fic |= FIC_FS_ROOT;
|
||||
ret = scoutfs_btree_read_items(sb, &roots->fs_root, key, start, end,
|
||||
rid.is_fs = true;
|
||||
ret = scoutfs_btree_read_items(sb, &roots.fs_root, key, start, end,
|
||||
forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
rid.fic &= ~FIC_FS_ROOT;
|
||||
rid.is_fs = false;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &roots->logs_root, <k, &iref);
|
||||
ret = scoutfs_btree_next(sb, &roots.logs_root, <k, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(lt)) {
|
||||
ltk = *iref.key;
|
||||
@@ -317,57 +340,30 @@ int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_r
|
||||
|
||||
scoutfs_inc_counter(sb, forest_bloom_pass);
|
||||
|
||||
if ((le64_to_cpu(lt.flags) & SCOUTFS_LOG_TREES_FINALIZED))
|
||||
rid.fic |= FIC_FINALIZED;
|
||||
|
||||
ret = scoutfs_btree_read_items(sb, <.item_root, key, start,
|
||||
end, forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
rid.fic &= ~FIC_FINALIZED;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
prev_refs = refs;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
struct scoutfs_net_roots roots;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_forest_read_items_roots(sb, &roots, key, bloom_key, start, end,
|
||||
cb, arg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the items are deltas then combine the src with the destination
|
||||
* value and store the result in the destination.
|
||||
*
|
||||
* Returns:
|
||||
* -errno: fatal error, no change
|
||||
* 0: not delta items, no change
|
||||
* +ve: SCOUTFS_DELTA_ values indicating when dst and/or src can be dropped
|
||||
*/
|
||||
int scoutfs_forest_combine_deltas(struct scoutfs_key *key, void *dst, int dst_len,
|
||||
void *src, int src_len)
|
||||
{
|
||||
if (key->sk_zone == SCOUTFS_XATTR_TOTL_ZONE)
|
||||
return scoutfs_xattr_combine_totl(dst, dst_len, src, src_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that the bloom bits for the lock's start key are all set in
|
||||
* the current log's bloom block. We record the nr of our log tree in
|
||||
@@ -437,29 +433,29 @@ out:
|
||||
|
||||
/*
|
||||
* The caller is commiting items in the transaction and has found the
|
||||
* greatest item seq amongst them. We store it in the log_trees root
|
||||
* greatest item version amongst them. We store it in the log_trees root
|
||||
* to send to the server.
|
||||
*/
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq)
|
||||
void scoutfs_forest_set_max_vers(struct super_block *sb, u64 max_vers)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
finf->our_log.max_item_seq = cpu_to_le64(max_seq);
|
||||
finf->our_log.max_item_vers = cpu_to_le64(max_vers);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is calling during setup to find the greatest item seq
|
||||
* The server is calling during setup to find the greatest item version
|
||||
* amongst all the log tree roots. They have the authoritative current
|
||||
* super.
|
||||
*
|
||||
* Item seqs are only used to compare items in log trees, not in the
|
||||
* main fs tree. All we have to do is find the greatest seq amongst the
|
||||
* log_trees so that the core seq will have a greater seq than all the
|
||||
* items in the log_trees.
|
||||
* Item versions are only used to compare items in log trees, not in the
|
||||
* main fs tree. All we have to do is find the greatest version amongst
|
||||
* the log_trees so that new locks will have a write_version greater
|
||||
* than all the items in the log_trees.
|
||||
*/
|
||||
int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *seq)
|
||||
int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *vers)
|
||||
{
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
@@ -467,7 +463,7 @@ int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
int ret;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
*seq = 0;
|
||||
*vers = 0;
|
||||
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &super->logs_root, <k, &iref);
|
||||
@@ -475,7 +471,8 @@ int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
if (iref.val_len == sizeof(struct scoutfs_log_trees)) {
|
||||
ltk = *iref.key;
|
||||
lt = iref.val;
|
||||
*seq = max(*seq, le64_to_cpu(lt->max_item_seq));
|
||||
*vers = max(*vers,
|
||||
le64_to_cpu(lt->max_item_vers));
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
@@ -524,59 +521,6 @@ int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_forest_inc_inode_count(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
atomic64_inc(&finf->inode_count_delta);
|
||||
}
|
||||
|
||||
void scoutfs_forest_dec_inode_count(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
atomic64_dec(&finf->inode_count_delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the total inode count from the super block and all the
|
||||
* log_btrees it references. ESTALE from read blocks is returned to the
|
||||
* caller who is expected to retry or return hard errors.
|
||||
*/
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count)
|
||||
{
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
*inode_count = le64_to_cpu(super->inode_count);
|
||||
|
||||
scoutfs_key_init_log_trees(&key, 0, 0);
|
||||
for (;;) {
|
||||
ret = scoutfs_btree_next(sb, &super->logs_root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(*lt)) {
|
||||
key = *iref.key;
|
||||
scoutfs_key_inc(&key);
|
||||
lt = iref.val;
|
||||
*inode_count += le64_to_cpu(lt->inode_count_delta);
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from transactions as a new transaction opens and is
|
||||
* serialized with all writers.
|
||||
@@ -597,7 +541,7 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
memset(&finf->our_log, 0, sizeof(finf->our_log));
|
||||
finf->our_log.item_root = lt->item_root;
|
||||
finf->our_log.bloom_ref = lt->bloom_ref;
|
||||
finf->our_log.max_item_seq = lt->max_item_seq;
|
||||
finf->our_log.max_item_vers = lt->max_item_vers;
|
||||
finf->our_log.rid = lt->rid;
|
||||
finf->our_log.nr = lt->nr;
|
||||
finf->srch_file = lt->srch_file;
|
||||
@@ -605,8 +549,6 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
WARN_ON_ONCE(finf->srch_bl); /* commiting should have put the block */
|
||||
finf->srch_bl = NULL;
|
||||
|
||||
atomic64_set(&finf->inode_count_delta, le64_to_cpu(lt->inode_count_delta));
|
||||
|
||||
trace_scoutfs_forest_init_our_log(sb, le64_to_cpu(lt->rid),
|
||||
le64_to_cpu(lt->nr),
|
||||
le64_to_cpu(lt->item_root.ref.blkno),
|
||||
@@ -629,138 +571,15 @@ void scoutfs_forest_get_btrees(struct super_block *sb,
|
||||
lt->item_root = finf->our_log.item_root;
|
||||
lt->bloom_ref = finf->our_log.bloom_ref;
|
||||
lt->srch_file = finf->srch_file;
|
||||
lt->max_item_seq = finf->our_log.max_item_seq;
|
||||
lt->max_item_vers = finf->our_log.max_item_vers;
|
||||
|
||||
scoutfs_block_put(sb, finf->srch_bl);
|
||||
finf->srch_bl = NULL;
|
||||
|
||||
lt->inode_count_delta = cpu_to_le64(atomic64_read(&finf->inode_count_delta));
|
||||
|
||||
trace_scoutfs_forest_prepare_commit(sb, <->item_root.ref,
|
||||
<->bloom_ref);
|
||||
}
|
||||
|
||||
#define LOG_MERGE_DELAY_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* Regularly try to get a log merge request from the server. If we get
|
||||
* a request we walk the log_trees items to find input trees and pass
|
||||
* them to btree_merge. All of our work is done in dirty blocks
|
||||
* allocated from available free blocks that the server gave us. If we
|
||||
* hit an error then we drop our dirty blocks without writing them and
|
||||
* send an error flag to the server so they can reclaim our allocators
|
||||
* and ignore the rest of our work.
|
||||
*/
|
||||
static void scoutfs_forest_log_merge_worker(struct work_struct *work)
|
||||
{
|
||||
struct forest_info *finf = container_of(work, struct forest_info,
|
||||
log_merge_dwork.work);
|
||||
struct super_block *sb = finf->sb;
|
||||
struct scoutfs_btree_root_head *rhead = NULL;
|
||||
struct scoutfs_btree_root_head *tmp;
|
||||
struct scoutfs_log_merge_complete comp;
|
||||
struct scoutfs_log_merge_request req;
|
||||
struct scoutfs_log_trees *lt;
|
||||
struct scoutfs_block_writer wri;
|
||||
struct scoutfs_alloc alloc;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key next;
|
||||
struct scoutfs_key key;
|
||||
unsigned long delay;
|
||||
LIST_HEAD(inputs);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_log_merge(sb, &req);
|
||||
if (ret < 0)
|
||||
goto resched;
|
||||
|
||||
comp.root = req.root;
|
||||
comp.start = req.start;
|
||||
comp.end = req.end;
|
||||
comp.remain = req.end;
|
||||
comp.rid = req.rid;
|
||||
comp.seq = req.seq;
|
||||
comp.flags = 0;
|
||||
|
||||
scoutfs_alloc_init(&alloc, &req.meta_avail, &req.meta_freed);
|
||||
scoutfs_block_writer_init(sb, &wri);
|
||||
|
||||
/* find finalized input log trees within the input seq */
|
||||
for (scoutfs_key_init_log_trees(&key, 0, 0); ; scoutfs_key_inc(&key)) {
|
||||
|
||||
if (!rhead) {
|
||||
rhead = kmalloc(sizeof(*rhead), GFP_NOFS);
|
||||
if (!rhead) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_next(sb, &req.logs_root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(*lt)) {
|
||||
key = *iref.key;
|
||||
lt = iref.val;
|
||||
if (lt->item_root.ref.blkno != 0 &&
|
||||
(le64_to_cpu(lt->flags) & SCOUTFS_LOG_TREES_FINALIZED) &&
|
||||
(le64_to_cpu(lt->finalize_seq) < le64_to_cpu(req.input_seq))) {
|
||||
rhead->root = lt->item_root;
|
||||
list_add_tail(&rhead->head, &inputs);
|
||||
rhead = NULL;
|
||||
}
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* shouldn't be possible, but it's harmless */
|
||||
if (list_empty(&inputs)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_merge(sb, &alloc, &wri, &req.start, &req.end,
|
||||
&next, &comp.root, &inputs,
|
||||
!!(req.flags & cpu_to_le64(SCOUTFS_LOG_MERGE_REQUEST_SUBTREE)),
|
||||
SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT, 10,
|
||||
(2 * 1024 * 1024));
|
||||
if (ret == -ERANGE) {
|
||||
comp.remain = next;
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_REMAIN);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
scoutfs_alloc_prepare_commit(sb, &alloc, &wri);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_block_writer_write(sb, &wri);
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
|
||||
comp.meta_avail = alloc.avail;
|
||||
comp.meta_freed = alloc.freed;
|
||||
if (ret < 0)
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_ERROR);
|
||||
|
||||
ret = scoutfs_client_commit_log_merge(sb, &comp);
|
||||
|
||||
kfree(rhead);
|
||||
list_for_each_entry_safe(rhead, tmp, &inputs, head)
|
||||
kfree(rhead);
|
||||
|
||||
resched:
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(LOG_MERGE_DELAY_MS);
|
||||
queue_delayed_work(finf->workq, &finf->log_merge_dwork, delay);
|
||||
}
|
||||
|
||||
int scoutfs_forest_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -774,20 +593,10 @@ int scoutfs_forest_setup(struct super_block *sb)
|
||||
}
|
||||
|
||||
/* the finf fields will be setup as we open a transaction */
|
||||
finf->sb = sb;
|
||||
mutex_init(&finf->mutex);
|
||||
mutex_init(&finf->srch_mutex);
|
||||
INIT_DELAYED_WORK(&finf->log_merge_dwork,
|
||||
scoutfs_forest_log_merge_worker);
|
||||
|
||||
sbi->forest_info = finf;
|
||||
|
||||
finf->workq = alloc_workqueue("scoutfs_log_merge", WQ_NON_REENTRANT |
|
||||
WQ_UNBOUND | WQ_HIGHPRI, 0);
|
||||
if (!finf->workq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
@@ -796,24 +605,6 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void scoutfs_forest_start(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
queue_delayed_work(finf->workq, &finf->log_merge_dwork,
|
||||
msecs_to_jiffies(LOG_MERGE_DELAY_MS));
|
||||
}
|
||||
|
||||
void scoutfs_forest_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
if (finf && finf->workq) {
|
||||
cancel_delayed_work_sync(&finf->log_merge_dwork);
|
||||
destroy_workqueue(finf->workq);
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_forest_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -821,7 +612,6 @@ void scoutfs_forest_destroy(struct super_block *sb)
|
||||
|
||||
if (finf) {
|
||||
scoutfs_block_put(sb, finf->srch_bl);
|
||||
|
||||
kfree(finf);
|
||||
sbi->forest_info = NULL;
|
||||
}
|
||||
|
||||
@@ -4,45 +4,33 @@
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
struct scoutfs_block;
|
||||
struct scoutfs_lock;
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
/* caller gives an item to the callback */
|
||||
enum {
|
||||
FIC_FS_ROOT = (1 << 0),
|
||||
FIC_FINALIZED = (1 << 1),
|
||||
};
|
||||
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb, struct scoutfs_key *key, u64 seq,
|
||||
u8 flags, void *val, int val_len, int fic, void *arg);
|
||||
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_log_item_value *liv,
|
||||
void *val, int val_len, void *arg);
|
||||
|
||||
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *next);
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_lock *lock,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock);
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq);
|
||||
int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *seq);
|
||||
void scoutfs_forest_set_max_vers(struct super_block *sb, u64 max_vers);
|
||||
int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *vers);
|
||||
int scoutfs_forest_insert_list(struct super_block *sb,
|
||||
struct scoutfs_btree_item_list *lst);
|
||||
int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id);
|
||||
|
||||
void scoutfs_forest_inc_inode_count(struct super_block *sb);
|
||||
void scoutfs_forest_dec_inode_count(struct super_block *sb);
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count);
|
||||
|
||||
void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
@@ -50,15 +38,7 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
void scoutfs_forest_get_btrees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt);
|
||||
|
||||
/* > 0 error codes */
|
||||
#define SCOUTFS_DELTA_COMBINED 1 /* src val was combined, drop src */
|
||||
#define SCOUTFS_DELTA_COMBINED_NULL 2 /* combined val has no data, drop both */
|
||||
int scoutfs_forest_combine_deltas(struct scoutfs_key *key, void *dst, int dst_len,
|
||||
void *src, int src_len);
|
||||
|
||||
int scoutfs_forest_setup(struct super_block *sb);
|
||||
void scoutfs_forest_start(struct super_block *sb);
|
||||
void scoutfs_forest_stop(struct super_block *sb);
|
||||
void scoutfs_forest_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,20 +1,8 @@
|
||||
#ifndef _SCOUTFS_FORMAT_H_
|
||||
#define _SCOUTFS_FORMAT_H_
|
||||
|
||||
/*
|
||||
* The format version defines the format of structures on devices,
|
||||
* structures that are communicated over the wire, and the protocol
|
||||
* behind the structures.
|
||||
*/
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN_STR __stringify(SCOUTFS_FORMAT_VERSION_MIN)
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 2
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX_STR __stringify(SCOUTFS_FORMAT_VERSION_MAX)
|
||||
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_RETENTION 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_QUOTA 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_INDX_TAG 2
|
||||
#define SCOUTFS_INTEROP_VERSION 0ULL
|
||||
#define SCOUTFS_INTEROP_VERSION_STR __stringify(0)
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
@@ -180,15 +168,6 @@ struct scoutfs_key {
|
||||
#define sko_rid _sk_first
|
||||
#define sko_ino _sk_second
|
||||
|
||||
/* quota rules */
|
||||
#define skqr_hash _sk_second
|
||||
#define skqr_coll_nr _sk_third
|
||||
|
||||
/* xattr totl */
|
||||
#define skxt_a _sk_first
|
||||
#define skxt_b _sk_second
|
||||
#define skxt_c _sk_third
|
||||
|
||||
/* inode */
|
||||
#define ski_ino _sk_first
|
||||
|
||||
@@ -216,16 +195,22 @@ struct scoutfs_key {
|
||||
#define sklt_rid _sk_first
|
||||
#define sklt_nr _sk_second
|
||||
|
||||
/* lock clients */
|
||||
#define sklc_rid _sk_first
|
||||
|
||||
/* seqs */
|
||||
#define skts_trans_seq _sk_first
|
||||
#define skts_rid _sk_second
|
||||
|
||||
/* mounted clients */
|
||||
#define skmc_rid _sk_first
|
||||
|
||||
/* free extents by blkno */
|
||||
#define skfb_end _sk_first
|
||||
#define skfb_len _sk_second
|
||||
/* free extents by order */
|
||||
#define skfo_revord _sk_first
|
||||
#define skfo_end _sk_second
|
||||
#define skfo_len _sk_third
|
||||
#define skfb_end _sk_second
|
||||
#define skfb_len _sk_third
|
||||
/* free extents by len */
|
||||
#define skfl_neglen _sk_second
|
||||
#define skfl_blkno _sk_third
|
||||
|
||||
struct scoutfs_avl_root {
|
||||
__le16 node;
|
||||
@@ -261,15 +246,11 @@ struct scoutfs_btree_root {
|
||||
struct scoutfs_btree_item {
|
||||
struct scoutfs_avl_node node;
|
||||
struct scoutfs_key key;
|
||||
__le64 seq;
|
||||
__le16 val_off;
|
||||
__le16 val_len;
|
||||
__u8 flags;
|
||||
__u8 __pad[3];
|
||||
__u8 __pad[4];
|
||||
};
|
||||
|
||||
#define SCOUTFS_ITEM_FLAG_DELETION (1 << 0)
|
||||
|
||||
struct scoutfs_btree_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_avl_root item_root;
|
||||
@@ -278,7 +259,7 @@ struct scoutfs_btree_block {
|
||||
__le16 mid_free_len;
|
||||
__u8 level;
|
||||
__u8 __pad[7];
|
||||
struct scoutfs_btree_item items[];
|
||||
struct scoutfs_btree_item items[0];
|
||||
/* leaf blocks have a fixed size item offset hash table at the end */
|
||||
};
|
||||
|
||||
@@ -307,10 +288,9 @@ struct scoutfs_alloc_list_head {
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 total_nr;
|
||||
__le32 first_nr;
|
||||
__le32 flags;
|
||||
__u8 __pad[4];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* While the main allocator uses extent items in btree blocks, metadata
|
||||
* allocations for a single transaction are recorded in arrays in
|
||||
@@ -327,7 +307,7 @@ struct scoutfs_alloc_list_block {
|
||||
struct scoutfs_block_ref next;
|
||||
__le32 start;
|
||||
__le32 nr;
|
||||
__le64 blknos[]; /* naturally aligned for sorting */
|
||||
__le64 blknos[0]; /* naturally aligned for sorting */
|
||||
};
|
||||
|
||||
#define SCOUTFS_ALLOC_LIST_MAX_BLOCKS \
|
||||
@@ -339,25 +319,17 @@ struct scoutfs_alloc_list_block {
|
||||
*/
|
||||
struct scoutfs_alloc_root {
|
||||
__le64 total_len;
|
||||
__le32 flags;
|
||||
__le32 _pad;
|
||||
struct scoutfs_btree_root root;
|
||||
};
|
||||
|
||||
/* Shared by _alloc_list_head and _alloc_root */
|
||||
#define SCOUTFS_ALLOC_FLAG_LOW (1U << 0)
|
||||
|
||||
/* types of allocators, exposed to alloc_detail ioctl */
|
||||
#define SCOUTFS_ALLOC_OWNER_NONE 0
|
||||
#define SCOUTFS_ALLOC_OWNER_SERVER 1
|
||||
#define SCOUTFS_ALLOC_OWNER_MOUNT 2
|
||||
#define SCOUTFS_ALLOC_OWNER_SRCH 3
|
||||
#define SCOUTFS_ALLOC_OWNER_LOG_MERGE 4
|
||||
|
||||
struct scoutfs_mounted_client_btree_val {
|
||||
union scoutfs_inet_addr addr;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
};
|
||||
|
||||
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
|
||||
@@ -390,7 +362,7 @@ struct scoutfs_srch_file {
|
||||
|
||||
struct scoutfs_srch_parent {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_block_ref refs[];
|
||||
struct scoutfs_block_ref refs[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_SRCH_PARENT_REFS \
|
||||
@@ -405,7 +377,7 @@ struct scoutfs_srch_block {
|
||||
struct scoutfs_srch_entry tail;
|
||||
__le32 entry_nr;
|
||||
__le32 entry_bytes;
|
||||
__u8 entries[];
|
||||
__u8 entries[0];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -458,20 +430,10 @@ struct scoutfs_srch_compact {
|
||||
/* client -> server: compaction failed */
|
||||
#define SCOUTFS_SRCH_COMPACT_FLAG_ERROR (1 << 5)
|
||||
|
||||
#define SCOUTFS_DATA_ALLOC_MAX_ZONES 1024
|
||||
#define SCOUTFS_DATA_ALLOC_ZONE_BYTES DIV_ROUND_UP(SCOUTFS_DATA_ALLOC_MAX_ZONES, 8)
|
||||
#define SCOUTFS_DATA_ALLOC_ZONE_LE64S DIV_ROUND_UP(SCOUTFS_DATA_ALLOC_MAX_ZONES, 64)
|
||||
|
||||
/*
|
||||
* XXX I imagine we should rename these now that they've evolved to track
|
||||
* all the btrees that clients use during a transaction. It's not just
|
||||
* about item logs, it's about clients making changes to trees.
|
||||
*
|
||||
* @get_trans_seq, @commit_trans_seq: These pair of sequence numbers
|
||||
* determine if a transaction is currently open for the mount that owns
|
||||
* the log_trees struct. get_trans_seq is advanced by the server as the
|
||||
* transaction is opened. The server sets commit_trans_seq equal to
|
||||
* get_ as the transaction is committed.
|
||||
*/
|
||||
struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
@@ -481,27 +443,31 @@ struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_root data_avail;
|
||||
struct scoutfs_alloc_root data_freed;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
__le64 data_alloc_zone_blocks;
|
||||
__le64 data_alloc_zones[SCOUTFS_DATA_ALLOC_ZONE_LE64S];
|
||||
__le64 inode_count_delta;
|
||||
__le64 get_trans_seq;
|
||||
__le64 commit_trans_seq;
|
||||
__le64 max_item_seq;
|
||||
__le64 finalize_seq;
|
||||
__le64 max_item_vers;
|
||||
__le64 rid;
|
||||
__le64 nr;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_LOG_TREES_FINALIZED (1ULL << 0)
|
||||
struct scoutfs_log_item_value {
|
||||
__le64 vers;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
__u8 data[0];
|
||||
};
|
||||
|
||||
/* FS items are limited by the max btree value length */
|
||||
#define SCOUTFS_MAX_VAL_SIZE SCOUTFS_BTREE_MAX_VAL_LEN
|
||||
/*
|
||||
* FS items are limited by the max btree value length with the log item
|
||||
* value header.
|
||||
*/
|
||||
#define SCOUTFS_MAX_VAL_SIZE \
|
||||
(SCOUTFS_BTREE_MAX_VAL_LEN - sizeof(struct scoutfs_log_item_value))
|
||||
|
||||
#define SCOUTFS_LOG_ITEM_FLAG_DELETION (1 << 0)
|
||||
|
||||
struct scoutfs_bloom_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 total_set;
|
||||
__le64 bits[];
|
||||
__le64 bits[0];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -518,127 +484,50 @@ struct scoutfs_bloom_block {
|
||||
member_sizeof(struct scoutfs_bloom_block, bits[0]) * 8)
|
||||
#define SCOUTFS_FOREST_BLOOM_FUNC_BITS (SCOUTFS_BLOCK_LG_SHIFT + 3)
|
||||
|
||||
/*
|
||||
* A private server btree item which records the status of a log merge
|
||||
* operation that is in progress.
|
||||
*/
|
||||
struct scoutfs_log_merge_status {
|
||||
struct scoutfs_key next_range_key;
|
||||
__le64 nr_requests;
|
||||
__le64 nr_complete;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* A request is sent to the client and stored in a server btree item to
|
||||
* record resources that would be reclaimed if the client failed. It
|
||||
* has all the inputs needed for the client to perform its portion of a
|
||||
* merge.
|
||||
*/
|
||||
struct scoutfs_log_merge_request {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root logs_root;
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
__le64 input_seq;
|
||||
__le64 rid;
|
||||
__le64 seq;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
/* request root is subtree of fs root at parent, restricted merging modifications */
|
||||
#define SCOUTFS_LOG_MERGE_REQUEST_SUBTREE (1ULL << 0)
|
||||
|
||||
/*
|
||||
* The output of a client's merge of log btree items into a subtree
|
||||
* rooted at a parent in the fs_root. The client sends it to the
|
||||
* server, who stores it in a btree item for later splicing/rebalancing.
|
||||
*/
|
||||
struct scoutfs_log_merge_complete {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key remain;
|
||||
__le64 rid;
|
||||
__le64 seq;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
/* merge failed, ignore completion and reclaim stored request */
|
||||
#define SCOUTFS_LOG_MERGE_COMP_ERROR (1ULL << 0)
|
||||
/* merge didn't complete range, restart from remain */
|
||||
#define SCOUTFS_LOG_MERGE_COMP_REMAIN (1ULL << 1)
|
||||
|
||||
/*
|
||||
* Range items record the ranges of the fs keyspace that still need to
|
||||
* be merged. They're added as a merge starts, removed as requests are
|
||||
* sent and added back if the request didn't consume its entire range.
|
||||
*/
|
||||
struct scoutfs_log_merge_range {
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
};
|
||||
|
||||
struct scoutfs_log_merge_freeing {
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key key;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* Keys are first sorted by major key zones.
|
||||
*/
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 4
|
||||
#define SCOUTFS_ORPHAN_ZONE 8
|
||||
#define SCOUTFS_QUOTA_ZONE 10
|
||||
#define SCOUTFS_XATTR_TOTL_ZONE 12
|
||||
#define SCOUTFS_XATTR_INDX_ZONE 14
|
||||
#define SCOUTFS_FS_ZONE 16
|
||||
#define SCOUTFS_LOCK_ZONE 20
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 1
|
||||
#define SCOUTFS_RID_ZONE 2
|
||||
#define SCOUTFS_FS_ZONE 3
|
||||
#define SCOUTFS_LOCK_ZONE 4
|
||||
/* Items only stored in server btrees */
|
||||
#define SCOUTFS_LOG_TREES_ZONE 24
|
||||
#define SCOUTFS_MOUNTED_CLIENT_ZONE 28
|
||||
#define SCOUTFS_SRCH_ZONE 32
|
||||
#define SCOUTFS_FREE_EXTENT_BLKNO_ZONE 36
|
||||
#define SCOUTFS_FREE_EXTENT_ORDER_ZONE 40
|
||||
/* Items only stored in log merge server btrees */
|
||||
#define SCOUTFS_LOG_MERGE_STATUS_ZONE 44
|
||||
#define SCOUTFS_LOG_MERGE_RANGE_ZONE 48
|
||||
#define SCOUTFS_LOG_MERGE_REQUEST_ZONE 52
|
||||
#define SCOUTFS_LOG_MERGE_COMPLETE_ZONE 56
|
||||
#define SCOUTFS_LOG_MERGE_FREEING_ZONE 60
|
||||
#define SCOUTFS_LOG_TREES_ZONE 6
|
||||
#define SCOUTFS_LOCK_CLIENTS_ZONE 7
|
||||
#define SCOUTFS_TRANS_SEQ_ZONE 8
|
||||
#define SCOUTFS_MOUNTED_CLIENT_ZONE 9
|
||||
#define SCOUTFS_SRCH_ZONE 10
|
||||
#define SCOUTFS_FREE_EXTENT_ZONE 11
|
||||
|
||||
/* inode index zone */
|
||||
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 4
|
||||
#define SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE 8
|
||||
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 1
|
||||
#define SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE 2
|
||||
#define SCOUTFS_INODE_INDEX_NR 3 /* don't forget to update */
|
||||
|
||||
/* orphan zone, redundant type used for clarity */
|
||||
#define SCOUTFS_ORPHAN_TYPE 4
|
||||
|
||||
/* quota zone */
|
||||
#define SCOUTFS_QUOTA_RULE_TYPE 4
|
||||
/* rid zone (also used in server alloc btree) */
|
||||
#define SCOUTFS_ORPHAN_TYPE 1
|
||||
|
||||
/* fs zone */
|
||||
#define SCOUTFS_INODE_TYPE 4
|
||||
#define SCOUTFS_XATTR_TYPE 8
|
||||
#define SCOUTFS_DIRENT_TYPE 12
|
||||
#define SCOUTFS_READDIR_TYPE 16
|
||||
#define SCOUTFS_LINK_BACKREF_TYPE 20
|
||||
#define SCOUTFS_SYMLINK_TYPE 24
|
||||
#define SCOUTFS_DATA_EXTENT_TYPE 28
|
||||
#define SCOUTFS_INODE_TYPE 1
|
||||
#define SCOUTFS_XATTR_TYPE 2
|
||||
#define SCOUTFS_DIRENT_TYPE 3
|
||||
#define SCOUTFS_READDIR_TYPE 4
|
||||
#define SCOUTFS_LINK_BACKREF_TYPE 5
|
||||
#define SCOUTFS_SYMLINK_TYPE 6
|
||||
#define SCOUTFS_DATA_EXTENT_TYPE 7
|
||||
|
||||
/* lock zone, only ever found in lock ranges, never in persistent items */
|
||||
#define SCOUTFS_RENAME_TYPE 4
|
||||
#define SCOUTFS_RENAME_TYPE 1
|
||||
|
||||
/* srch zone, only in server btrees */
|
||||
#define SCOUTFS_SRCH_LOG_TYPE 4
|
||||
#define SCOUTFS_SRCH_BLOCKS_TYPE 8
|
||||
#define SCOUTFS_SRCH_PENDING_TYPE 12
|
||||
#define SCOUTFS_SRCH_BUSY_TYPE 16
|
||||
#define SCOUTFS_SRCH_LOG_TYPE 1
|
||||
#define SCOUTFS_SRCH_BLOCKS_TYPE 2
|
||||
#define SCOUTFS_SRCH_PENDING_TYPE 3
|
||||
#define SCOUTFS_SRCH_BUSY_TYPE 4
|
||||
|
||||
/* free extents in allocator btrees in client and server, by blkno or len */
|
||||
#define SCOUTFS_FREE_EXTENT_BLKNO_TYPE 1
|
||||
#define SCOUTFS_FREE_EXTENT_LEN_TYPE 2
|
||||
|
||||
/* file data extents have start and len in key */
|
||||
struct scoutfs_data_extent_val {
|
||||
@@ -660,48 +549,9 @@ struct scoutfs_xattr {
|
||||
__le16 val_len;
|
||||
__u8 name_len;
|
||||
__u8 __pad[5];
|
||||
__u8 name[];
|
||||
__u8 name[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* .totl. xattrs are mapped to items. The dotted u64s in the xattr name
|
||||
* map to the item key. The item value total is the sum of all the
|
||||
* xattr values. The item value count records the number of xattrs
|
||||
* contributing to the total and is used when combining logged items to
|
||||
* determine if totals are being created or destroyed.
|
||||
*/
|
||||
struct scoutfs_xattr_totl_val {
|
||||
__le64 total;
|
||||
__le64 count;
|
||||
};
|
||||
|
||||
#define SQ_RF_TOTL_COUNT (1 << 0)
|
||||
#define SQ_RF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_NS_LITERAL 0
|
||||
#define SQ_NS_PROJ 1
|
||||
#define SQ_NS_UID 2
|
||||
#define SQ_NS_GID 3
|
||||
#define SQ_NS__NR 4
|
||||
#define SQ_NS__NR_SELECT (SQ_NS__NR - 1) /* !literal */
|
||||
|
||||
#define SQ_NF_SELECT (1 << 0)
|
||||
#define SQ_NF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_OP_INODE 0
|
||||
#define SQ_OP_DATA 1
|
||||
#define SQ_OP__NR 2
|
||||
|
||||
struct scoutfs_quota_rule_val {
|
||||
__le64 name_val[3];
|
||||
__le64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
/* XXX does this exist upstream somewhere? */
|
||||
#define member_sizeof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
|
||||
@@ -725,25 +575,16 @@ struct scoutfs_quota_rule_val {
|
||||
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
|
||||
|
||||
/*
|
||||
* Once a leader is elected they send heartbeat messages to all quorum
|
||||
* members at regular intervals to force members to wait the much longer
|
||||
* heartbeat timeout. Once the heartbeat timeout expires without
|
||||
* receiving a heartbeat message a member will start an election.
|
||||
* Once a leader is elected they send out heartbeats at regular
|
||||
* intervals to force members to wait the much longer heartbeat timeout.
|
||||
* Once heartbeat timeout expires without receiving a heartbeat they'll
|
||||
* switch over the performing elections.
|
||||
*
|
||||
* These determine how long it could take members to notice that a
|
||||
* leader has gone silent and start to elect a new leader. The
|
||||
* heartbeat timeout can be changed at run time by options.
|
||||
* leader has gone silent and start to elect a new leader.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_MIN_HB_TIMEO_MS (2 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_DEF_HB_TIMEO_MS (10 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_MAX_HB_TIMEO_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* A newly elected leader will give fencing some time before giving up and
|
||||
* shutting down.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_FENCE_TO_MS (15 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
struct scoutfs_quorum_message {
|
||||
__le64 fsid;
|
||||
@@ -776,76 +617,35 @@ struct scoutfs_quorum_config {
|
||||
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
enum {
|
||||
SCOUTFS_QUORUM_EVENT_BEGIN, /* quorum service starting up */
|
||||
SCOUTFS_QUORUM_EVENT_TERM, /* updated persistent term */
|
||||
SCOUTFS_QUORUM_EVENT_ELECT, /* won election */
|
||||
SCOUTFS_QUORUM_EVENT_FENCE, /* server fenced others */
|
||||
SCOUTFS_QUORUM_EVENT_STOP, /* server stopped */
|
||||
SCOUTFS_QUORUM_EVENT_END, /* quorum service shutting down */
|
||||
SCOUTFS_QUORUM_EVENT_NR,
|
||||
};
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 write_nr;
|
||||
__le64 term;
|
||||
__le64 random_write_mark;
|
||||
__le64 flags;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 write_nr;
|
||||
__le64 rid;
|
||||
__le64 term;
|
||||
struct scoutfs_timespec ts;
|
||||
} events[SCOUTFS_QUORUM_EVENT_NR];
|
||||
} write, update_term, set_leader, clear_leader, fenced;
|
||||
};
|
||||
|
||||
/*
|
||||
* Tunable options that apply to the entire system. They can be set in
|
||||
* mkfs or in sysfs files which send an rpc to the server to make the
|
||||
* change. The super version defines the options that exist.
|
||||
*
|
||||
* @set_bits: bits for each 64bit starting offset after set_bits
|
||||
* indicate which logical option is set.
|
||||
*
|
||||
* @data_alloc_zone_blocks: if set, the data device is logically divided
|
||||
* into contiguous zones of this many blocks. Data allocation will try
|
||||
* and isolate allocated extents for each mount to their own zone. The
|
||||
* zone size must be larger than the data alloc high water mark and
|
||||
* large enough such that the number of zones is kept within its static
|
||||
* limit.
|
||||
*/
|
||||
struct scoutfs_volume_options {
|
||||
__le64 set_bits;
|
||||
__le64 data_alloc_zone_blocks;
|
||||
__le64 __future_expansion[63];
|
||||
};
|
||||
|
||||
#define scoutfs_volopt_nr(field) \
|
||||
((offsetof(struct scoutfs_volume_options, field) - \
|
||||
(offsetof(struct scoutfs_volume_options, set_bits) + \
|
||||
member_sizeof(struct scoutfs_volume_options, set_bits))) / sizeof(__le64))
|
||||
#define scoutfs_volopt_bit(field) \
|
||||
(1ULL << scoutfs_volopt_nr(field))
|
||||
|
||||
#define SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_NR \
|
||||
scoutfs_volopt_nr(data_alloc_zone_blocks)
|
||||
#define SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_BIT \
|
||||
scoutfs_volopt_bit(data_alloc_zone_blocks)
|
||||
|
||||
#define SCOUTFS_VOLOPT_EXPANSION_BITS \
|
||||
(~(scoutfs_volopt_bit(__future_expansion) - 1))
|
||||
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
|
||||
|
||||
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
|
||||
|
||||
struct scoutfs_super_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 id;
|
||||
__le64 fmt_vers;
|
||||
__le64 version;
|
||||
__le64 flags;
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 seq;
|
||||
__le64 next_ino;
|
||||
__le64 inode_count;
|
||||
__le64 next_trans_seq;
|
||||
__le64 total_meta_blocks; /* both static and dynamic */
|
||||
__le64 first_meta_blkno; /* first dynamically allocated */
|
||||
__le64 last_meta_blkno;
|
||||
__le64 total_data_blocks;
|
||||
__le64 first_data_blkno;
|
||||
__le64 last_data_blkno;
|
||||
struct scoutfs_quorum_config qconf;
|
||||
struct scoutfs_alloc_root meta_alloc[2];
|
||||
struct scoutfs_alloc_root data_alloc;
|
||||
@@ -853,10 +653,10 @@ struct scoutfs_super_block {
|
||||
struct scoutfs_alloc_list_head server_meta_freed[2];
|
||||
struct scoutfs_btree_root fs_root;
|
||||
struct scoutfs_btree_root logs_root;
|
||||
struct scoutfs_btree_root log_merge;
|
||||
struct scoutfs_btree_root lock_clients;
|
||||
struct scoutfs_btree_root trans_seqs;
|
||||
struct scoutfs_btree_root mounted_clients;
|
||||
struct scoutfs_btree_root srch_root;
|
||||
struct scoutfs_volume_options volopt;
|
||||
};
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
@@ -880,6 +680,13 @@ struct scoutfs_super_block {
|
||||
*
|
||||
* @offline_blocks: The number of fixed 4k blocks that could be made
|
||||
* online by staging.
|
||||
*
|
||||
* XXX
|
||||
* - otime?
|
||||
* - compat flags?
|
||||
* - version?
|
||||
* - generation?
|
||||
* - be more careful with rdev?
|
||||
*/
|
||||
struct scoutfs_inode {
|
||||
__le64 size;
|
||||
@@ -890,7 +697,6 @@ struct scoutfs_inode {
|
||||
__le64 offline_blocks;
|
||||
__le64 next_readdir_pos;
|
||||
__le64 next_xattr_id;
|
||||
__le64 version;
|
||||
__le32 nlink;
|
||||
__le32 uid;
|
||||
__le32 gid;
|
||||
@@ -900,39 +706,9 @@ struct scoutfs_inode {
|
||||
struct scoutfs_timespec atime;
|
||||
struct scoutfs_timespec ctime;
|
||||
struct scoutfs_timespec mtime;
|
||||
struct scoutfs_timespec crtime;
|
||||
__le64 proj;
|
||||
};
|
||||
|
||||
#define SCOUTFS_INODE_FMT_V1_BYTES offsetof(struct scoutfs_inode, proj)
|
||||
|
||||
/*
|
||||
* There are so few versions that we don't mind doing this work inline
|
||||
* so that both utils and kernel can share these. Mounting has already
|
||||
* checked that the format version is within the supported min and max,
|
||||
* so these functions only deal with size variance within that band.
|
||||
*/
|
||||
/* Returns the native written inode size for the given format version, 0 for bad version */
|
||||
static inline int scoutfs_inode_vers_bytes(__u64 fmt_vers)
|
||||
{
|
||||
if (fmt_vers == 1)
|
||||
return SCOUTFS_INODE_FMT_V1_BYTES;
|
||||
else
|
||||
return sizeof(struct scoutfs_inode);
|
||||
}
|
||||
/*
|
||||
* Returns true if bytes is a valid inode size to read from the given
|
||||
* version. The given version must be greater than the version that
|
||||
* introduced the size.
|
||||
*/
|
||||
static inline int scoutfs_inode_valid_vers_bytes(__u64 fmt_vers, int bytes)
|
||||
{
|
||||
return (bytes == sizeof(struct scoutfs_inode) && fmt_vers == SCOUTFS_FORMAT_VERSION_MAX) ||
|
||||
(bytes == SCOUTFS_INODE_FMT_V1_BYTES);
|
||||
}
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INO_FLAG_RETENTION 0x2
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
|
||||
@@ -953,7 +729,7 @@ struct scoutfs_dirent {
|
||||
__le64 pos;
|
||||
__u8 type;
|
||||
__u8 __pad[7];
|
||||
__u8 name[];
|
||||
__u8 name[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NAME_LEN 255
|
||||
@@ -981,7 +757,6 @@ enum scoutfs_dentry_type {
|
||||
#define SCOUTFS_XATTR_MAX_NAME_LEN 255
|
||||
#define SCOUTFS_XATTR_MAX_VAL_LEN 65535
|
||||
#define SCOUTFS_XATTR_MAX_PART_SIZE SCOUTFS_MAX_VAL_SIZE
|
||||
#define SCOUTFS_XATTR_MAX_TOTL_U64 23 /* octal U64_MAX */
|
||||
|
||||
#define SCOUTFS_XATTR_NR_PARTS(name_len, val_len) \
|
||||
DIV_ROUND_UP(sizeof(struct scoutfs_xattr) + name_len + val_len, \
|
||||
@@ -1012,7 +787,7 @@ enum scoutfs_dentry_type {
|
||||
*/
|
||||
struct scoutfs_net_greeting {
|
||||
__le64 fsid;
|
||||
__le64 fmt_vers;
|
||||
__le64 version;
|
||||
__le64 server_term;
|
||||
__le64 rid;
|
||||
__le64 flags;
|
||||
@@ -1043,6 +818,7 @@ struct scoutfs_net_greeting {
|
||||
* response messages.
|
||||
*/
|
||||
struct scoutfs_net_header {
|
||||
__le64 clock_sync_id;
|
||||
__le64 seq;
|
||||
__le64 recv_seq;
|
||||
__le64 id;
|
||||
@@ -1051,7 +827,7 @@ struct scoutfs_net_header {
|
||||
__u8 flags;
|
||||
__u8 error;
|
||||
__u8 __pad[3];
|
||||
__u8 data[];
|
||||
__u8 data[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_FLAG_RESPONSE (1 << 0)
|
||||
@@ -1062,21 +838,13 @@ enum scoutfs_net_cmd {
|
||||
SCOUTFS_NET_CMD_ALLOC_INODES,
|
||||
SCOUTFS_NET_CMD_GET_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_SYNC_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_GET_ROOTS,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
SCOUTFS_NET_CMD_GET_LAST_SEQ,
|
||||
SCOUTFS_NET_CMD_LOCK,
|
||||
SCOUTFS_NET_CMD_LOCK_RECOVER,
|
||||
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
|
||||
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
|
||||
SCOUTFS_NET_CMD_GET_LOG_MERGE,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_MERGE,
|
||||
SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
SCOUTFS_NET_CMD_GET_VOLOPT,
|
||||
SCOUTFS_NET_CMD_SET_VOLOPT,
|
||||
SCOUTFS_NET_CMD_CLEAR_VOLOPT,
|
||||
SCOUTFS_NET_CMD_RESIZE_DEVICES,
|
||||
SCOUTFS_NET_CMD_STATFS,
|
||||
SCOUTFS_NET_CMD_FAREWELL,
|
||||
SCOUTFS_NET_CMD_UNKNOWN,
|
||||
};
|
||||
@@ -1091,8 +859,7 @@ enum scoutfs_net_cmd {
|
||||
EXPAND_NET_ERRNO(ENOMEM) \
|
||||
EXPAND_NET_ERRNO(EIO) \
|
||||
EXPAND_NET_ERRNO(ENOSPC) \
|
||||
EXPAND_NET_ERRNO(EINVAL) \
|
||||
EXPAND_NET_ERRNO(ENOLINK)
|
||||
EXPAND_NET_ERRNO(EINVAL)
|
||||
|
||||
#undef EXPAND_NET_ERRNO
|
||||
#define EXPAND_NET_ERRNO(which) SCOUTFS_NET_ERR_##which,
|
||||
@@ -1120,32 +887,23 @@ struct scoutfs_net_roots {
|
||||
struct scoutfs_btree_root srch_root;
|
||||
};
|
||||
|
||||
struct scoutfs_net_resize_devices {
|
||||
__le64 new_total_meta_blocks;
|
||||
__le64 new_total_data_blocks;
|
||||
};
|
||||
|
||||
struct scoutfs_net_statfs {
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 free_meta_blocks;
|
||||
__le64 total_meta_blocks;
|
||||
__le64 free_data_blocks;
|
||||
__le64 total_data_blocks;
|
||||
__le64 inode_count;
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock {
|
||||
struct scoutfs_key key;
|
||||
__le64 write_seq;
|
||||
__le64 write_version;
|
||||
__u8 old_mode;
|
||||
__u8 new_mode;
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock_grant_response {
|
||||
struct scoutfs_net_lock nl;
|
||||
struct scoutfs_net_roots roots;
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock_recover {
|
||||
__le16 nr;
|
||||
__u8 __pad[6];
|
||||
struct scoutfs_net_lock locks[];
|
||||
struct scoutfs_net_lock locks[0];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_LOCK_MAX_RECOVER_NR \
|
||||
@@ -1160,7 +918,6 @@ enum scoutfs_lock_trace {
|
||||
SLT_INVALIDATE,
|
||||
SLT_REQUEST,
|
||||
SLT_RESPONSE,
|
||||
SLT_NR,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1213,42 +970,4 @@ enum scoutfs_corruption_sources {
|
||||
|
||||
#define SC_NR_LONGS DIV_ROUND_UP(SC_NR_SOURCES, BITS_PER_LONG)
|
||||
|
||||
#define SCOUTFS_OPEN_INO_MAP_SHIFT 10
|
||||
#define SCOUTFS_OPEN_INO_MAP_BITS (1 << SCOUTFS_OPEN_INO_MAP_SHIFT)
|
||||
#define SCOUTFS_OPEN_INO_MAP_MASK (SCOUTFS_OPEN_INO_MAP_BITS - 1)
|
||||
#define SCOUTFS_OPEN_INO_MAP_LE64S (SCOUTFS_OPEN_INO_MAP_BITS / 64)
|
||||
|
||||
/*
|
||||
* The request and response conversation is as follows:
|
||||
*
|
||||
* client[init] -> server:
|
||||
* group_nr = G
|
||||
* req_id = 0 (I)
|
||||
* server -> client[*]
|
||||
* group_nr = G
|
||||
* req_id = R
|
||||
* client[*] -> server
|
||||
* group_nr = G (I)
|
||||
* req_id = R
|
||||
* bits
|
||||
* server -> client[init]
|
||||
* group_nr = G (I)
|
||||
* req_id = R (I)
|
||||
* bits
|
||||
*
|
||||
* Many of the fields in individual messages are ignored ("I") because
|
||||
* the net id or the omap req_id can be used to identify the
|
||||
* conversation. We always include them on the wire to make inspected
|
||||
* messages easier to follow.
|
||||
*/
|
||||
struct scoutfs_open_ino_map_args {
|
||||
__le64 group_nr;
|
||||
__le64 req_id;
|
||||
};
|
||||
|
||||
struct scoutfs_open_ino_map {
|
||||
struct scoutfs_open_ino_map_args args;
|
||||
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
1292
kmod/src/inode.c
1292
kmod/src/inode.c
File diff suppressed because it is too large
Load Diff
@@ -9,8 +9,6 @@
|
||||
|
||||
struct scoutfs_lock;
|
||||
|
||||
#define SCOUTFS_INODE_NR_INDICES 2
|
||||
|
||||
struct scoutfs_inode_info {
|
||||
/* read or initialized for each inode instance */
|
||||
u64 ino;
|
||||
@@ -21,9 +19,7 @@ struct scoutfs_inode_info {
|
||||
u64 data_version;
|
||||
u64 online_blocks;
|
||||
u64 offline_blocks;
|
||||
u64 proj;
|
||||
u32 flags;
|
||||
struct kc_timespec crtime;
|
||||
|
||||
/*
|
||||
* Protects per-inode extent items, most particularly readers
|
||||
@@ -41,32 +37,23 @@ struct scoutfs_inode_info {
|
||||
*/
|
||||
struct mutex item_mutex;
|
||||
bool have_item;
|
||||
u64 item_majors[SCOUTFS_INODE_NR_INDICES];
|
||||
u32 item_minors[SCOUTFS_INODE_NR_INDICES];
|
||||
u64 item_majors[SCOUTFS_INODE_INDEX_NR];
|
||||
u32 item_minors[SCOUTFS_INODE_INDEX_NR];
|
||||
|
||||
/* updated at on each new lock acquisition */
|
||||
atomic64_t last_refreshed;
|
||||
|
||||
/* initialized once for slab object */
|
||||
seqlock_t seqlock;
|
||||
seqcount_t seqcount;
|
||||
bool staging; /* holder of i_mutex is staging */
|
||||
struct scoutfs_per_task pt_data_lock;
|
||||
struct scoutfs_data_waitq data_waitq;
|
||||
struct rw_semaphore xattr_rwsem;
|
||||
struct list_head writeback_entry;
|
||||
|
||||
struct scoutfs_lock_coverage ino_lock_cov;
|
||||
|
||||
struct list_head iput_head;
|
||||
unsigned long iput_count;
|
||||
unsigned long iput_flags;
|
||||
struct rb_node writeback_node;
|
||||
|
||||
struct inode inode;
|
||||
};
|
||||
|
||||
/* try to prune dcache aliases with queued iput */
|
||||
#define SI_IPUT_FLAG_PRUNE (1 << 0)
|
||||
|
||||
static inline struct scoutfs_inode_info *SCOUTFS_I(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct scoutfs_inode_info, inode);
|
||||
@@ -81,15 +68,11 @@ struct inode *scoutfs_alloc_inode(struct super_block *sb);
|
||||
void scoutfs_destroy_inode(struct inode *inode);
|
||||
int scoutfs_drop_inode(struct inode *inode);
|
||||
void scoutfs_evict_inode(struct inode *inode);
|
||||
void scoutfs_inode_queue_iput(struct inode *inode, unsigned long flags);
|
||||
int scoutfs_orphan_inode(struct inode *inode);
|
||||
|
||||
#define SCOUTFS_IGF_LINKED (1 << 0) /* enoent if nlink == 0 */
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf);
|
||||
struct inode *scoutfs_ilookup_nowait(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup_nowait_nonewfree(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
|
||||
|
||||
|
||||
void scoutfs_inode_init_key(struct scoutfs_key *key, u64 ino);
|
||||
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
|
||||
u32 minor, u64 ino);
|
||||
int scoutfs_inode_index_start(struct super_block *sb, u64 *seq);
|
||||
@@ -99,9 +82,9 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
|
||||
struct list_head *list, u64 ino,
|
||||
umode_t mode);
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq, bool allocing);
|
||||
struct list_head *list, u64 seq);
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq, bool allocing);
|
||||
bool set_data_seq);
|
||||
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
|
||||
|
||||
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);
|
||||
@@ -109,8 +92,9 @@ void scoutfs_update_inode_item(struct inode *inode, struct scoutfs_lock *lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret);
|
||||
int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t rdev,
|
||||
u64 ino, struct scoutfs_lock *lock, struct inode **inode_ret);
|
||||
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t rdev, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
void scoutfs_inode_set_meta_seq(struct inode *inode);
|
||||
void scoutfs_inode_set_data_seq(struct inode *inode);
|
||||
@@ -121,43 +105,25 @@ u64 scoutfs_inode_meta_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_version(struct inode *inode);
|
||||
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off);
|
||||
u32 scoutfs_inode_get_flags(struct inode *inode);
|
||||
void scoutfs_inode_set_flags(struct inode *inode, u32 and, u32 or);
|
||||
u64 scoutfs_inode_get_proj(struct inode *inode);
|
||||
void scoutfs_inode_set_proj(struct inode *inode, u64 proj);
|
||||
|
||||
int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock);
|
||||
|
||||
int scoutfs_inode_check_retention(struct inode *inode);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock);
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
|
||||
int flags);
|
||||
int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat);
|
||||
#else
|
||||
int scoutfs_getattr(KC_VFS_NS_DEF
|
||||
const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags);
|
||||
#endif
|
||||
int scoutfs_setattr(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct iattr *attr);
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb);
|
||||
int scoutfs_scan_orphans(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode);
|
||||
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);
|
||||
|
||||
u64 scoutfs_last_ino(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_exit(void);
|
||||
int scoutfs_inode_init(void);
|
||||
|
||||
int scoutfs_inode_setup(struct super_block *sb);
|
||||
void scoutfs_inode_start(struct super_block *sb);
|
||||
void scoutfs_inode_orphan_stop(struct super_block *sb);
|
||||
void scoutfs_inode_flush_iput(struct super_block *sb);
|
||||
void scoutfs_inode_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
1002
kmod/src/ioctl.c
1002
kmod/src/ioctl.c
File diff suppressed because it is too large
Load Diff
486
kmod/src/ioctl.h
486
kmod/src/ioctl.h
@@ -13,7 +13,8 @@
|
||||
* This is enforced by pahole scripting in external build environments.
|
||||
*/
|
||||
|
||||
#define SCOUTFS_IOCTL_MAGIC 0xE8 /* arbitrarily chosen hole in ioctl-number.rst */
|
||||
/* XXX I have no idea how these are chosen. */
|
||||
#define SCOUTFS_IOCTL_MAGIC 's'
|
||||
|
||||
/*
|
||||
* Packed scoutfs keys rarely cross the ioctl boundary so we have a
|
||||
@@ -87,7 +88,7 @@ enum scoutfs_ino_walk_seq_type {
|
||||
* Adds entries to the user's buffer for each inode that is found in the
|
||||
* given index between the first and last positions.
|
||||
*/
|
||||
#define SCOUTFS_IOC_WALK_INODES _IOW(SCOUTFS_IOCTL_MAGIC, 1, \
|
||||
#define SCOUTFS_IOC_WALK_INODES _IOR(SCOUTFS_IOCTL_MAGIC, 1, \
|
||||
struct scoutfs_ioctl_walk_inodes)
|
||||
|
||||
/*
|
||||
@@ -162,11 +163,11 @@ struct scoutfs_ioctl_ino_path_result {
|
||||
__u64 dir_pos;
|
||||
__u16 path_bytes;
|
||||
__u8 _pad[6];
|
||||
__u8 path[];
|
||||
__u8 path[0];
|
||||
};
|
||||
|
||||
/* Get a single path from the root to the given inode number */
|
||||
#define SCOUTFS_IOC_INO_PATH _IOW(SCOUTFS_IOCTL_MAGIC, 2, \
|
||||
#define SCOUTFS_IOC_INO_PATH _IOR(SCOUTFS_IOCTL_MAGIC, 2, \
|
||||
struct scoutfs_ioctl_ino_path)
|
||||
|
||||
/*
|
||||
@@ -214,16 +215,23 @@ struct scoutfs_ioctl_stage {
|
||||
/*
|
||||
* Give the user inode fields that are not otherwise visible. statx()
|
||||
* isn't always available and xattrs are relatively expensive.
|
||||
*
|
||||
* @valid_bytes stores the number of bytes that are valid in the
|
||||
* structure. The caller sets this to the size of the struct that they
|
||||
* understand. The kernel then fills and copies back the min of the
|
||||
* size they and the user caller understand. The user can tell if a
|
||||
* field is set if all of its bytes are within the valid_bytes that the
|
||||
* kernel set on return.
|
||||
*
|
||||
* New fields are only added to the end of the struct.
|
||||
*/
|
||||
struct scoutfs_ioctl_stat_more {
|
||||
__u64 valid_bytes;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 crtime_sec;
|
||||
__u32 crtime_nsec;
|
||||
__u8 _pad[4];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_STAT_MORE _IOR(SCOUTFS_IOCTL_MAGIC, 5, \
|
||||
@@ -251,16 +259,15 @@ struct scoutfs_ioctl_data_waiting {
|
||||
__u8 _pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U64_MAX << 0)
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U8_MAX << 0)
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOW(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOR(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
struct scoutfs_ioctl_data_waiting)
|
||||
|
||||
/*
|
||||
* If i_size is set then data_version must be non-zero. If the offline
|
||||
* flag is set then i_size must be set and a offline extent will be
|
||||
* created from offset 0 to i_size. The time fields are always applied
|
||||
* to the inode.
|
||||
* created from offset 0 to i_size.
|
||||
*/
|
||||
struct scoutfs_ioctl_setattr_more {
|
||||
__u64 data_version;
|
||||
@@ -268,12 +275,11 @@ struct scoutfs_ioctl_setattr_more {
|
||||
__u64 flags;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
__u8 _pad[4];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_OFFLINE (1 << 0)
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U64_MAX << 1)
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U8_MAX << 1)
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE _IOW(SCOUTFS_IOCTL_MAGIC, 7, \
|
||||
struct scoutfs_ioctl_setattr_more)
|
||||
@@ -285,8 +291,8 @@ struct scoutfs_ioctl_listxattr_hidden {
|
||||
__u32 hash_pos;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_LISTXATTR_HIDDEN _IOWR(SCOUTFS_IOCTL_MAGIC, 8, \
|
||||
struct scoutfs_ioctl_listxattr_hidden)
|
||||
#define SCOUTFS_IOC_LISTXATTR_HIDDEN _IOR(SCOUTFS_IOCTL_MAGIC, 8, \
|
||||
struct scoutfs_ioctl_listxattr_hidden)
|
||||
|
||||
/*
|
||||
* Return the inode numbers of inodes which might contain the given
|
||||
@@ -339,23 +345,32 @@ struct scoutfs_ioctl_search_xattrs {
|
||||
/* set in output_flags if returned inodes reached last_ino */
|
||||
#define SCOUTFS_SEARCH_XATTRS_OFLAG_END (1ULL << 0)
|
||||
|
||||
#define SCOUTFS_IOC_SEARCH_XATTRS _IOW(SCOUTFS_IOCTL_MAGIC, 9, \
|
||||
struct scoutfs_ioctl_search_xattrs)
|
||||
#define SCOUTFS_IOC_SEARCH_XATTRS _IOR(SCOUTFS_IOCTL_MAGIC, 9, \
|
||||
struct scoutfs_ioctl_search_xattrs)
|
||||
|
||||
/*
|
||||
* Give the user information about the filesystem.
|
||||
*
|
||||
* @valid_bytes stores the number of bytes that are valid in the
|
||||
* structure. The caller sets this to the size of the struct that they
|
||||
* understand. The kernel then fills and copies back the min of the
|
||||
* size they and the user caller understand. The user can tell if a
|
||||
* field is set if all of its bytes are within the valid_bytes that the
|
||||
* kernel set on return.
|
||||
*
|
||||
* @committed_seq: All seqs up to and including this seq have been
|
||||
* committed. Can be compared with meta_seq and data_seq from inodes in
|
||||
* stat_more to discover if changes have been committed to disk.
|
||||
*
|
||||
* New fields are only added to the end of the struct.
|
||||
*/
|
||||
struct scoutfs_ioctl_statfs_more {
|
||||
__u64 valid_bytes;
|
||||
__u64 fsid;
|
||||
__u64 rid;
|
||||
__u64 committed_seq;
|
||||
__u64 total_meta_blocks;
|
||||
__u64 total_data_blocks;
|
||||
__u64 reserved_meta_blocks;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_STATFS_MORE _IOR(SCOUTFS_IOCTL_MAGIC, 10, \
|
||||
@@ -366,22 +381,17 @@ struct scoutfs_ioctl_statfs_more {
|
||||
*
|
||||
* Find current waiters that match the inode, op, and block range to wake
|
||||
* up and return an error.
|
||||
*
|
||||
* (*) ca. v1.25 and earlier required that the data_version passed match
|
||||
* that of the waiter, but this check is removed. It was never needed
|
||||
* because no data is modified during this ioctl. Any data_version value
|
||||
* here is thus since then ignored.
|
||||
*/
|
||||
struct scoutfs_ioctl_data_wait_err {
|
||||
__u64 ino;
|
||||
__u64 data_version; /* Ignored, see above (*) */
|
||||
__u64 data_version;
|
||||
__u64 offset;
|
||||
__u64 count;
|
||||
__u64 op;
|
||||
__s64 err;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAIT_ERR _IOW(SCOUTFS_IOCTL_MAGIC, 11, \
|
||||
#define SCOUTFS_IOC_DATA_WAIT_ERR _IOR(SCOUTFS_IOCTL_MAGIC, 11, \
|
||||
struct scoutfs_ioctl_data_wait_err)
|
||||
|
||||
|
||||
@@ -400,7 +410,7 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOW(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOR(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
struct scoutfs_ioctl_alloc_detail)
|
||||
|
||||
/*
|
||||
@@ -408,13 +418,12 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
* on the same file system.
|
||||
*
|
||||
* from_fd specifies the source file and the ioctl is called on the
|
||||
* destination file. Both files must have write access. from_off specifies
|
||||
* the byte offset in the source, to_off is the byte offset in the
|
||||
* destination, and len is the number of bytes in the region to move. All of
|
||||
* the offsets and lengths must be in multiples of 4KB, except in the case
|
||||
* where the from_off + len ends at the i_size of the source
|
||||
* file. data_version is only used when STAGE flag is set (see below). flags
|
||||
* field is currently only used to optionally specify STAGE behavior.
|
||||
* destination file. Both files must have write access. from_off
|
||||
* specifies the byte offset in the source, to_off is the byte offset in
|
||||
* the destination, and len is the number of bytes in the region to
|
||||
* move. All of the offsets and lengths must be in multiples of 4KB,
|
||||
* except in the case where the from_off + len ends at the i_size of the
|
||||
* source file.
|
||||
*
|
||||
* This interface only moves extents which are block granular, it does
|
||||
* not perform RMW of sub-block byte extents and it does not overwrite
|
||||
@@ -426,426 +435,33 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
* i_size. The i_size update will maintain final partial blocks in the
|
||||
* source.
|
||||
*
|
||||
* If STAGE flag is not set, it will return an error if either of the files
|
||||
* have offline extents. It will return 0 when all of the extents in the
|
||||
* source region have been moved to the destination. Moving extents updates
|
||||
* the ctime, mtime, meta_seq, data_seq, and data_version fields of both the
|
||||
* source and destination inodes. If an error is returned then partial
|
||||
* It will return an error if either of the files have offline extents.
|
||||
* It will return 0 when all of the extents in the source region have
|
||||
* been moved to the destination. Moving extents updates the ctime,
|
||||
* mtime, meta_seq, data_seq, and data_version fields of both the source
|
||||
* and destination inodes. If an error is returned then partial
|
||||
* progress may have been made and inode fields may have been updated.
|
||||
*
|
||||
* If STAGE flag is set, as above except destination range must be in an
|
||||
* offline extent. Fields are updated only for source inode.
|
||||
*
|
||||
* Errors specific to this interface include:
|
||||
*
|
||||
* EINVAL: from_off, len, or to_off aren't a multiple of 4KB; the source
|
||||
* and destination files are the same inode; either the source or
|
||||
* destination is not a regular file; the destination file has
|
||||
* an existing overlapping extent (if STAGE flag not set); the
|
||||
* destination range is not in an offline extent (if STAGE set).
|
||||
* an existing overlapping extent.
|
||||
* EOVERFLOW: either from_off + len or to_off + len exceeded 64bits.
|
||||
* EBADF: from_fd isn't a valid open file descriptor.
|
||||
* EXDEV: the source and destination files are in different filesystems.
|
||||
* EISDIR: either the source or destination is a directory.
|
||||
* ENODATA: either the source or destination file have offline extents and
|
||||
* STAGE flag is not set.
|
||||
* ESTALE: data_version does not match destination data_version.
|
||||
* ENODATA: either the source or destination file have offline extents.
|
||||
*/
|
||||
#define SCOUTFS_IOC_MB_STAGE (1 << 0)
|
||||
#define SCOUTFS_IOC_MB_UNKNOWN (U64_MAX << 1)
|
||||
|
||||
struct scoutfs_ioctl_move_blocks {
|
||||
__u64 from_fd;
|
||||
__u64 from_off;
|
||||
__u64 len;
|
||||
__u64 to_off;
|
||||
__u64 data_version;
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOW(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOR(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
struct scoutfs_ioctl_move_blocks)
|
||||
|
||||
struct scoutfs_ioctl_resize_devices {
|
||||
__u64 new_total_meta_blocks;
|
||||
__u64 new_total_data_blocks;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_RESIZE_DEVICES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 14, struct scoutfs_ioctl_resize_devices)
|
||||
|
||||
#define SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR 3
|
||||
|
||||
/*
|
||||
* Copy global totals of .totl. xattr value payloads to the user. This
|
||||
* only sees xattrs which have been committed and this doesn't force
|
||||
* commits of dirty data throughout the system. This can be out of sync
|
||||
* by the amount of xattrs that can be dirty in open transactions that
|
||||
* are being built throughout the system.
|
||||
*
|
||||
* pos_name: The array name of the first total that can be returned.
|
||||
* The name is derived from the key of the xattrs that contribute to the
|
||||
* total. For xattrs with a .totl.1.2.3 key, the pos_name[] should be
|
||||
* {1, 2, 3}.
|
||||
*
|
||||
* totals_ptr: An aligned pointer to a buffer that will be filled with
|
||||
* an array of scoutfs_ioctl_xattr_total structs for each total copied.
|
||||
*
|
||||
* totals_bytes: The size of the buffer in bytes. There must be room
|
||||
* for at least one struct element so that returning 0 can promise that
|
||||
* there were no more totals to copy after the pos_name.
|
||||
*
|
||||
* The number of copied elements is returned and 0 is returned if there
|
||||
* were no more totals to copy after the pos_name.
|
||||
*
|
||||
* In addition to the usual errnos (EIO, EINVAL, EPERM, EFAULT) this
|
||||
* adds:
|
||||
*
|
||||
* EINVAL: The totals_ buffer was not aligned or was not large enough
|
||||
* for a single struct entry.
|
||||
*/
|
||||
struct scoutfs_ioctl_read_xattr_totals {
|
||||
__u64 pos_name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
|
||||
__u64 totals_ptr;
|
||||
__u64 totals_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* An individual total that is given to userspace. The total is the
|
||||
* sum of all the values in the xattr payloads matching the name. The
|
||||
* count is the number of xattrs, not number of files, contributing to
|
||||
* the total.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_total {
|
||||
__u64 name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
|
||||
__u64 total;
|
||||
__u64 count;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_TOTALS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 15, struct scoutfs_ioctl_read_xattr_totals)
|
||||
|
||||
/*
|
||||
* This fills the caller's inos array with inode numbers that are in use
|
||||
* after the start ino, within an internal inode group.
|
||||
*
|
||||
* This only makes a promise about the state of the inode numbers within
|
||||
* the first and last numbers returned by one call. At one time, all of
|
||||
* those inodes were still allocated. They could have changed before
|
||||
* the call returned. And any numbers outside of the first and last
|
||||
* (or single) are undefined.
|
||||
*
|
||||
* This doesn't iterate over all allocated inodes, it only probes a
|
||||
* single group that the start inode is within. This interface was
|
||||
* first introduced to support tests that needed to find out about a
|
||||
* specific inode, while having some other similarly niche uses. It is
|
||||
* unsuitable for a consistent iteration over all the inode numbers in
|
||||
* use.
|
||||
*
|
||||
* This test of inode items doesn't serialize with the inode lifetime
|
||||
* mechanism. It only tells you the numbers of inodes that were once
|
||||
* active in the system and haven't yet been fully deleted. The inode
|
||||
* numbers returned could have been in the process of being deleted and
|
||||
* were already unreachable even before the call started.
|
||||
*
|
||||
* @start_ino: the first inode number that could be returned
|
||||
* @inos_ptr: pointer to an aligned array of 64bit inode numbers
|
||||
* @inos_bytes: the number of bytes available in the inos_ptr array
|
||||
*
|
||||
* Returns errors or the count of inode numbers returned, quite possibly
|
||||
* including 0.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_allocated_inos {
|
||||
__u64 start_ino;
|
||||
__u64 inos_ptr;
|
||||
__u64 inos_bytes;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_GET_ALLOCATED_INOS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 16, struct scoutfs_ioctl_get_allocated_inos)
|
||||
|
||||
/*
|
||||
* Get directory entries that refer to a specific inode.
|
||||
*
|
||||
* @ino: The target ino that we're finding referring entries to.
|
||||
* Constant across all the calls that make up an iteration over all the
|
||||
* inode's entries.
|
||||
*
|
||||
* @dir_ino: The inode number of a directory containing the entry to our
|
||||
* inode to search from. If this parent directory contains no more
|
||||
* entries to our inode then we'll search through other parent directory
|
||||
* inodes in inode order.
|
||||
*
|
||||
* @dir_pos: The position in the dir_ino parent directory of the entry
|
||||
* to our inode to search from. If there is no entry at this position
|
||||
* then we'll search through other entry positions in increasing order.
|
||||
* If we exhaust the parent directory then we'll search through
|
||||
* additional parent directories in inode order.
|
||||
*
|
||||
* @entries_ptr: A pointer to the buffer where found entries will be
|
||||
* stored. The pointer must be aligned to 16 bytes.
|
||||
*
|
||||
* @entries_bytes: The size of the buffer that will contain entries.
|
||||
*
|
||||
* To start iterating set the desired target ino, dir_ino to 0, dir_pos
|
||||
* to 0, and set result_ptr and _bytes to a sufficiently large buffer.
|
||||
* Each entry struct that's stored in the buffer adds some overhead so a
|
||||
* large multiple of the largest possible name is a reasonable choice.
|
||||
* (A few multiples of PATH_MAX perhaps.)
|
||||
*
|
||||
* Each call returns the total number of entries that were stored in the
|
||||
* entries buffer. Zero is returned when the search was successful and
|
||||
* no referring entries were found. The entries can be iterated over by
|
||||
* advancing each starting struct offset by the total number of bytes in
|
||||
* each entry. If the _LAST flag is set on an entry then there were no
|
||||
* more entries referring to the inode at the time of the call and
|
||||
* iteration can be stopped.
|
||||
*
|
||||
* To resume iteration set the next call's starting dir_ino and dir_pos
|
||||
* to one past the last entry seen. Increment the last entry's dir_pos,
|
||||
* and if it wrapped to 0, increment its dir_ino.
|
||||
*
|
||||
* This does not check that the caller has permission to read the
|
||||
* entries found in each containing directory. It requires
|
||||
* CAP_DAC_READ_SEARCH which bypasses path traversal permissions
|
||||
* checking.
|
||||
*
|
||||
* Entries returned by a single call can reflect any combination of
|
||||
* racing creation and removal of entries. Each entry existed at the
|
||||
* time it was read though it may have changed in the time it took to
|
||||
* return from the call. The set of entries returned may no longer
|
||||
* reflect the current set of entries and may not have existed at the
|
||||
* same time.
|
||||
*
|
||||
* This has no knowledge of the life cycle of the inode. It can return
|
||||
* 0 when there are no referring entries because either the target inode
|
||||
* doesn't exist, it is in the process of being deleted, or because it
|
||||
* is still open while being unlinked.
|
||||
*
|
||||
* On success this returns the number of entries filled in the buffer.
|
||||
* A return of 0 indicates that no entries referred to the inode.
|
||||
*
|
||||
* EINVAL is returned when there is a problem with the buffer. Either
|
||||
* it was not aligned or it was not large enough for the first entry.
|
||||
*
|
||||
* Many other errnos indicate hard failure to find the next entry.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_referring_entries {
|
||||
__u64 ino;
|
||||
__u64 dir_ino;
|
||||
__u64 dir_pos;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* @dir_ino: The inode of the directory containing the entry.
|
||||
*
|
||||
* @dir_pos: The readdir f_pos position of the entry within the
|
||||
* directory.
|
||||
*
|
||||
* @ino: The inode number of the target of the entry.
|
||||
*
|
||||
* @flags: Flags associated with this entry.
|
||||
*
|
||||
* @d_type: Inode type as specified with DT_ enum values in readdir(3).
|
||||
*
|
||||
* @entry_bytes: The total bytes taken by the entry in memory, including
|
||||
* the name and any alignment padding. The start of a following entry
|
||||
* will be found after this number of bytes.
|
||||
*
|
||||
* @name_len: The number of bytes in the name not including the trailing
|
||||
* null, ala strlen(3).
|
||||
*
|
||||
* @name: The null terminated name of the referring entry. In the
|
||||
* struct definition this array is sized to naturally align the struct.
|
||||
* That number of padded bytes are not necessarily found in the buffer
|
||||
* returned by _get_referring_entries;
|
||||
*/
|
||||
struct scoutfs_ioctl_dirent {
|
||||
__u64 dir_ino;
|
||||
__u64 dir_pos;
|
||||
__u64 ino;
|
||||
__u16 entry_bytes;
|
||||
__u8 flags;
|
||||
__u8 d_type;
|
||||
__u8 name_len;
|
||||
__u8 name[3];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOCTL_DIRENT_FLAG_LAST (1 << 0)
|
||||
|
||||
#define SCOUTFS_IOC_GET_REFERRING_ENTRIES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 17, struct scoutfs_ioctl_get_referring_entries)
|
||||
|
||||
struct scoutfs_ioctl_inode_attr_x {
|
||||
__u64 x_mask;
|
||||
__u64 x_flags;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
__u64 size;
|
||||
__u64 bits;
|
||||
__u64 project_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Behavioral flags set in the x_flags field. These flags don't
|
||||
* necessarily correspond to specific attributes, but instead change the
|
||||
* behaviour of a _get_ or _set_ operation.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_F_SIZE_OFFLINE: When setting i_size, also create
|
||||
* extents which are marked offline for the region of the file from
|
||||
* offset 0 to the new set size. This can only be set when setting the
|
||||
* size and has no effect if setting the size fails.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_F_SIZE_OFFLINE (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_F__UNKNOWN (U64_MAX << 1)
|
||||
|
||||
/*
|
||||
* Single-bit values stored in the @bits field. These indicate whether
|
||||
* the bit is set, or not. The main _IAX_ bits set in the mask indicate
|
||||
* whether this value bit is populated by _get or stored by _set.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_B_RETENTION (1ULL << 0)
|
||||
|
||||
/*
|
||||
* x_mask bits which indicate which attributes of the inode to populate
|
||||
* on return for _get or to set on the inode for _set. Each mask bit
|
||||
* corresponds to the matching named field in the attr_x struct passed
|
||||
* to the _get_ and _set_ calls.
|
||||
*
|
||||
* Each field can have different permissions or other attribute
|
||||
* requirements which can cause calls to fail. If _set_ fails then no
|
||||
* other attribute changes will have been made by the same call.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_RETENTION: Mark a file for retention. When marked,
|
||||
* no modification can be made to the file other than changing extended
|
||||
* attributes outside the "user." prefix and clearing the retention
|
||||
* mark. This can only be set on regular files and requires root (the
|
||||
* CAP_SYS_ADMIN capability). Other attributes can be set with a
|
||||
* set_attr_x call on a retention inode as long as that call also
|
||||
* successfully clears the retention mark.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_META_SEQ (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_DATA_SEQ (1ULL << 1)
|
||||
#define SCOUTFS_IOC_IAX_DATA_VERSION (1ULL << 2)
|
||||
#define SCOUTFS_IOC_IAX_ONLINE_BLOCKS (1ULL << 3)
|
||||
#define SCOUTFS_IOC_IAX_OFFLINE_BLOCKS (1ULL << 4)
|
||||
#define SCOUTFS_IOC_IAX_CTIME (1ULL << 5)
|
||||
#define SCOUTFS_IOC_IAX_CRTIME (1ULL << 6)
|
||||
#define SCOUTFS_IOC_IAX_SIZE (1ULL << 7)
|
||||
#define SCOUTFS_IOC_IAX_RETENTION (1ULL << 8)
|
||||
#define SCOUTFS_IOC_IAX_PROJECT_ID (1ULL << 9)
|
||||
|
||||
/* single bit attributes that are packed in the bits field as _B_ */
|
||||
#define SCOUTFS_IOC_IAX__BITS (SCOUTFS_IOC_IAX_RETENTION)
|
||||
/* inverse of all the bits we understand */
|
||||
#define SCOUTFS_IOC_IAX__UNKNOWN (U64_MAX << 10)
|
||||
|
||||
#define SCOUTFS_IOC_GET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 18, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
#define SCOUTFS_IOC_SET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 19, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
/*
|
||||
* (These fields are documented in the order that they're displayed by
|
||||
* the scoutfs cli utility which matches the sort order of the rules.)
|
||||
*
|
||||
* @prio: The priority of the rule. Rules are sorted by their fields
|
||||
* with prio at the highest magnitude. When multiple rules match the
|
||||
* rule with the highest sort order is enforced. The priority field
|
||||
* lets rules override the default field sort order.
|
||||
*
|
||||
* @name_val[3]: The three 64bit values that make up the name of the
|
||||
* totl xattr whose total will be checked against the rule's limit to
|
||||
* see if the quota rule has been exceeded. The behavior of the values
|
||||
* can be changed by their corresponding name_source and name_flags.
|
||||
*
|
||||
* @name_source[3]: The SQ_NS_ enums that control where the value comes
|
||||
* from. _LITERAL uses the value from name_val. Inode attribute
|
||||
* sources (_PROJ, _UID, _GID) are taken from the inode of the operation
|
||||
* that is being checked against the rule.
|
||||
*
|
||||
* @name_flags[3]: The SQ_NF_ enums that alter the name values. _SELECT
|
||||
* makes the rule only match if the inode attribute of the operation
|
||||
* matches the attribute value stored in name_val. This lets rules
|
||||
* match a specific value of an attribute rather than mapping all
|
||||
* attribute values of to totl names.
|
||||
*
|
||||
* @op: The SQ_OP_ enums which specify the operation that can't exceed
|
||||
* the rule's limit. _INODE checks inode creation and the inode
|
||||
* attributes are taken from the inode that would be created. _DATA
|
||||
* checks file data block allocation and the inode fields come from the
|
||||
* inode that is allocating the blocks.
|
||||
*
|
||||
* @limit: The 64bit value that is checked against the totl value
|
||||
* described by the rule. If the totl value is greater than or equal to
|
||||
* this value of the matching rule then the operation will return
|
||||
* -EDQUOT.
|
||||
*
|
||||
* @rule_flags: SQ_RF_TOTL_COUNT indicates that the rule's limit should
|
||||
* be checked against the number of xattrs contributing to a totl value
|
||||
* instead of the sum of the xattrs.
|
||||
*/
|
||||
struct scoutfs_ioctl_quota_rule {
|
||||
__u64 name_val[3];
|
||||
__u64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_get_quota_rules {
|
||||
__u64 iterator[2];
|
||||
__u64 rules_ptr;
|
||||
__u64 rules_nr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Rules are uniquely identified by their non-padded fields. Addition will fail
|
||||
* with -EEXIST if the specified rule already exists and deletion must find a rule
|
||||
* with all matching fields to delete.
|
||||
*/
|
||||
#define SCOUTFS_IOC_GET_QUOTA_RULES \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 20, struct scoutfs_ioctl_get_quota_rules)
|
||||
#define SCOUTFS_IOC_ADD_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 21, struct scoutfs_ioctl_quota_rule)
|
||||
#define SCOUTFS_IOC_DEL_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 22, struct scoutfs_ioctl_quota_rule)
|
||||
|
||||
/*
|
||||
* Inodes can be indexed in a global key space at a position determined
|
||||
* by a .indx. tagged xattr. The xattr name specifies the two index
|
||||
* position values, with major having the more significant comparison
|
||||
* order.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_index_entry {
|
||||
__u64 minor;
|
||||
__u64 ino;
|
||||
__u8 major;
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_read_xattr_index {
|
||||
__u64 flags;
|
||||
struct scoutfs_ioctl_xattr_index_entry first;
|
||||
struct scoutfs_ioctl_xattr_index_entry last;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_nr;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_INDEX \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 23, struct scoutfs_ioctl_read_xattr_index)
|
||||
|
||||
#endif
|
||||
|
||||
568
kmod/src/item.c
568
kmod/src/item.c
File diff suppressed because it is too large
Load Diff
@@ -3,8 +3,6 @@
|
||||
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_smaller_zero(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_exact(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock);
|
||||
@@ -17,15 +15,14 @@ int scoutfs_item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary);
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary);
|
||||
int scoutfs_item_delete_force(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
u64 scoutfs_item_dirty_pages(struct super_block *sb);
|
||||
int scoutfs_item_write_dirty(struct super_block *sb);
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include "kernelcompat.h"
|
||||
|
||||
#ifdef KC_SHRINKER_SHRINK
|
||||
#include <linux/shrinker.h>
|
||||
/*
|
||||
* If a target doesn't have that .{count,scan}_objects() interface then
|
||||
* we have a .shrink() helper that performs the shrink work in terms of
|
||||
* count/scan.
|
||||
*/
|
||||
int kc_shrink_wrapper_fn(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct kc_shrinker_wrapper *wrapper = container_of(shrink, struct kc_shrinker_wrapper, shrink);
|
||||
unsigned long nr;
|
||||
unsigned long rc;
|
||||
|
||||
if (sc->nr_to_scan != 0) {
|
||||
rc = wrapper->scan_objects(shrink, sc);
|
||||
/* translate magic values to the equivalent for older kernels */
|
||||
if (rc == SHRINK_STOP)
|
||||
return -1;
|
||||
else if (rc == SHRINK_EMPTY)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nr = wrapper->count_objects(shrink, sc);
|
||||
|
||||
return min_t(unsigned long, nr, INT_MAX);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_CURRENT_TIME_INODE
|
||||
struct timespec64 kc_current_time(struct inode *inode)
|
||||
{
|
||||
struct timespec64 now;
|
||||
unsigned gran;
|
||||
|
||||
getnstimeofday64(&now);
|
||||
|
||||
if (unlikely(!inode->i_sb)) {
|
||||
WARN(1, "current_time() called with uninitialized super_block in the inode");
|
||||
return now;
|
||||
}
|
||||
|
||||
gran = inode->i_sb->s_time_gran;
|
||||
|
||||
/* Avoid division in the common cases 1 ns and 1 s. */
|
||||
if (gran == 1) {
|
||||
/* nothing */
|
||||
} else if (gran == NSEC_PER_SEC) {
|
||||
now.tv_nsec = 0;
|
||||
} else if (gran > 1 && gran < NSEC_PER_SEC) {
|
||||
now.tv_nsec -= now.tv_nsec % gran;
|
||||
} else {
|
||||
WARN(1, "illegal file time granularity: %u", gran);
|
||||
}
|
||||
|
||||
return now;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
ssize_t
|
||||
kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written)
|
||||
{
|
||||
ssize_t status;
|
||||
struct iov_iter i;
|
||||
|
||||
iov_iter_init(&i, WRITE, iov, nr_segs, count);
|
||||
status = kc_generic_perform_write(iocb, &i, pos);
|
||||
|
||||
if (likely(status >= 0)) {
|
||||
written += status;
|
||||
*ppos = pos + status;
|
||||
}
|
||||
|
||||
return written ? written : status;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#ifdef KC_LIST_LRU_WALK_CB_ITEM_LOCK
|
||||
static enum lru_status kc_isolate(struct list_head *item, spinlock_t *lock, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args *args = cb_arg;
|
||||
|
||||
/* isolate doesn't use list, nr_items updated in caller */
|
||||
return args->isolate(item, NULL, args->cb_arg);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_walk(lru, kc_isolate, &args, nr_to_walk);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_shrink_walk(lru, sc, kc_isolate, &args);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_LRU_WALK_CB_LIST_LOCK
|
||||
static enum lru_status kc_isolate(struct list_head *item, struct list_lru_one *list,
|
||||
spinlock_t *lock, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args *args = cb_arg;
|
||||
|
||||
return args->isolate(item, list, args->cb_arg);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_walk(lru, kc_isolate, &args, nr_to_walk);
|
||||
}
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_shrink_walk(lru, sc, kc_isolate, &args);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,491 +1,48 @@
|
||||
#ifndef _SCOUTFS_KERNELCOMPAT_H_
|
||||
#define _SCOUTFS_KERNELCOMPAT_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#ifndef KC_ITERATE_DIR_CONTEXT
|
||||
#include <linux/fs.h>
|
||||
typedef filldir_t kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, dirent, ctx)
|
||||
#define KC_FOP_READDIR readdir
|
||||
#define kc_readdir_pos(filp, ctx) (filp)->f_pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, dirent, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
(ctx(dirent, name, name_len, pos, ino, dt) == 0)
|
||||
#else
|
||||
typedef struct dir_context * kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, ctx)
|
||||
#define KC_FOP_READDIR iterate
|
||||
#define kc_readdir_pos(filp, ctx) (ctx)->pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
dir_emit(ctx, name, name_len, ino, dt)
|
||||
#endif
|
||||
|
||||
#ifndef KC_DIR_EMIT_DOTS
|
||||
/*
|
||||
* v4.15-rc3-4-gae5e165d855d
|
||||
*
|
||||
* new API for handling inode->i_version. This forces us to
|
||||
* include this API where we need. We include it here for
|
||||
* convenience instead of where it's needed.
|
||||
* Kernels before ->iterate and don't have dir_emit_dots so we give them
|
||||
* one that works with the ->readdir() filldir() method.
|
||||
*/
|
||||
#ifdef KC_NEED_LINUX_IVERSION_H
|
||||
#include <linux/iversion.h>
|
||||
#else
|
||||
/*
|
||||
* Kernels before above version will need to fall back to
|
||||
* manipulating inode->i_version as previous with degraded
|
||||
* methods.
|
||||
*/
|
||||
#define inode_set_iversion_queried(inode, val) \
|
||||
do { \
|
||||
(inode)->i_version = val; \
|
||||
} while (0)
|
||||
#define inode_peek_iversion(inode) \
|
||||
({ \
|
||||
(inode)->i_version; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef KC_POSIX_ACL_VALID_USER_NS
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(user_ns, acl)
|
||||
#else
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(acl)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v3.6-rc1-24-gdbf2576e37da
|
||||
*
|
||||
* All workqueues are now non-reentrant, and the bit flag is removed
|
||||
* shortly after its uses were removed.
|
||||
*/
|
||||
#ifndef WQ_NON_REENTRANT
|
||||
#define WQ_NON_REENTRANT 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v3.18-rc2-19-gb5ae6b15bd73
|
||||
*
|
||||
* Folds d_materialise_unique into d_splice_alias. Note reversal
|
||||
* of arguments (Also note Documentation/filesystems/porting.rst)
|
||||
*/
|
||||
#ifndef KC_D_MATERIALISE_UNIQUE
|
||||
#define d_materialise_unique(dentry, inode) d_splice_alias(inode, dentry)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v4.8-rc1-29-g31051c85b5e2
|
||||
*
|
||||
* fall back to inode_change_ok() if setattr_prepare() isn't available
|
||||
*/
|
||||
#ifndef KC_SETATTR_PREPARE
|
||||
#define setattr_prepare(dentry, attr) inode_change_ok(d_inode(dentry), attr)
|
||||
#endif
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
#define __posix_acl_create posix_acl_create
|
||||
#define __posix_acl_chmod posix_acl_chmod
|
||||
#endif
|
||||
|
||||
#ifndef KC_PERCPU_COUNTER_ADD_BATCH
|
||||
#define percpu_counter_add_batch __percpu_counter_add
|
||||
#endif
|
||||
|
||||
#ifndef KC_MEMALLOC_NOFS_SAVE
|
||||
#define memalloc_nofs_save memalloc_noio_save
|
||||
#define memalloc_nofs_restore memalloc_noio_restore
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_BI_OPF
|
||||
#define kc_bio_get_opf(bio) \
|
||||
({ \
|
||||
(bio)->bi_opf; \
|
||||
})
|
||||
#define kc_bio_set_opf(bio, opf) \
|
||||
do { \
|
||||
(bio)->bi_opf = opf; \
|
||||
} while (0)
|
||||
#define kc_bio_set_sector(bio, sect) \
|
||||
do { \
|
||||
(bio)->bi_iter.bi_sector = sect;\
|
||||
} while (0)
|
||||
#define kc_submit_bio(bio) submit_bio(bio)
|
||||
#else
|
||||
#define kc_bio_get_opf(bio) \
|
||||
({ \
|
||||
(bio)->bi_rw; \
|
||||
})
|
||||
#define kc_bio_set_opf(bio, opf) \
|
||||
do { \
|
||||
(bio)->bi_rw = opf; \
|
||||
} while (0)
|
||||
#define kc_bio_set_sector(bio, sect) \
|
||||
do { \
|
||||
(bio)->bi_sector = sect; \
|
||||
} while (0)
|
||||
#define kc_submit_bio(bio) \
|
||||
do { \
|
||||
submit_bio((bio)->bi_rw, bio); \
|
||||
} while (0)
|
||||
#define bio_set_dev(bio, bdev) \
|
||||
do { \
|
||||
(bio)->bi_bdev = (bdev); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_BI_STATUS
|
||||
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio)
|
||||
#define kc_bio_get_errno(bio) ({ blk_status_to_errno((bio)->bi_status); })
|
||||
#else
|
||||
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio, int _error_arg)
|
||||
#define kc_bio_get_errno(bio) ({ (int)((void)(bio), _error_arg); })
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v4.13-rc1-6-ge462ec50cb5f
|
||||
*
|
||||
* MS_* (mount) flags from <linux/mount.h> should not be used in the kernel
|
||||
* anymore from 4.x onwards. Instead, we need to use the SB_* (superblock) flags
|
||||
*/
|
||||
#ifndef SB_POSIXACL
|
||||
#define SB_POSIXACL MS_POSIXACL
|
||||
#define SB_I_VERSION MS_I_VERSION
|
||||
#endif
|
||||
|
||||
#ifndef KC_CURRENT_TIME_INODE
|
||||
struct timespec64 kc_current_time(struct inode *inode);
|
||||
#define current_time kc_current_time
|
||||
#define kc_timespec timespec
|
||||
#else
|
||||
#define kc_timespec timespec64
|
||||
#endif
|
||||
|
||||
#ifndef KC_SHRINKER_SHRINK
|
||||
|
||||
#define KC_DEFINE_SHRINKER(name) struct shrinker name
|
||||
#define KC_INIT_SHRINKER_FUNCS(name, countfn, scanfn) do { \
|
||||
__typeof__(name) _shrink = (name); \
|
||||
_shrink->count_objects = (countfn); \
|
||||
_shrink->scan_objects = (scanfn); \
|
||||
_shrink->seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(ptr, type, shrinker)
|
||||
#ifdef KC_SHRINKER_NAME
|
||||
#define KC_REGISTER_SHRINKER register_shrinker
|
||||
#else
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr))
|
||||
#endif /* KC_SHRINKER_NAME */
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr)
|
||||
#else
|
||||
|
||||
#include <linux/shrinker.h>
|
||||
#ifndef SHRINK_STOP
|
||||
#define SHRINK_STOP (~0UL)
|
||||
#define SHRINK_EMPTY (~0UL - 1)
|
||||
#endif
|
||||
|
||||
int kc_shrink_wrapper_fn(struct shrinker *shrink, struct shrink_control *sc);
|
||||
struct kc_shrinker_wrapper {
|
||||
unsigned long (*count_objects)(struct shrinker *, struct shrink_control *sc);
|
||||
unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *sc);
|
||||
struct shrinker shrink;
|
||||
};
|
||||
|
||||
#define KC_DEFINE_SHRINKER(name) struct kc_shrinker_wrapper name;
|
||||
#define KC_INIT_SHRINKER_FUNCS(name, countfn, scanfn) do { \
|
||||
struct kc_shrinker_wrapper *_wrap = (name); \
|
||||
_wrap->count_objects = (countfn); \
|
||||
_wrap->scan_objects = (scanfn); \
|
||||
_wrap->shrink.shrink = kc_shrink_wrapper_fn; \
|
||||
_wrap->shrink.seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(container_of(ptr, struct kc_shrinker_wrapper, shrink), type, shrinker)
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr.shrink))
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr.shrink))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr.shrink)
|
||||
|
||||
#endif /* KC_SHRINKER_SHRINK */
|
||||
|
||||
#ifdef KC_KERNEL_GETSOCKNAME_ADDRLEN
|
||||
#include <linux/net.h>
|
||||
#include <linux/inet.h>
|
||||
static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *addr)
|
||||
static inline int dir_emit_dots(struct file *file, void *dirent,
|
||||
filldir_t filldir)
|
||||
{
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int ret = kernel_getsockname(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
static inline int kc_kernel_getpeername(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int ret = kernel_getpeername(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
#else
|
||||
#define kc_kernel_getsockname(sock, addr) kernel_getsockname(sock, addr)
|
||||
#define kc_kernel_getpeername(sock, addr) kernel_getpeername(sock, addr)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_CREATE_KERN_NET
|
||||
#define kc_sock_create_kern(family, type, proto, res) sock_create_kern(&init_net, family, type, proto, res)
|
||||
#else
|
||||
#define kc_sock_create_kern sock_create_kern
|
||||
#endif
|
||||
|
||||
#ifndef KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
ssize_t kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written);
|
||||
#define generic_file_buffered_write kc_generic_file_buffered_write
|
||||
#ifdef KC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
iocb->ki_pos = pos;
|
||||
return generic_perform_write(iocb, iter);
|
||||
}
|
||||
#else
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
return generic_perform_write(file, iter, pos);
|
||||
}
|
||||
#endif
|
||||
#endif // KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
|
||||
#ifndef KC_HAVE_BLK_OPF_T
|
||||
/* typedef __u32 __bitwise blk_opf_t; */
|
||||
typedef unsigned int blk_opf_t;
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
#define KC_LIST_CMP_CONST const
|
||||
#else
|
||||
#define KC_LIST_CMP_CONST
|
||||
#endif
|
||||
|
||||
#ifdef KC_VMALLOC_PGPROT_T
|
||||
#define kc__vmalloc(size, gfp_mask) __vmalloc(size, gfp_mask, PAGE_KERNEL)
|
||||
#else
|
||||
#define kc__vmalloc __vmalloc
|
||||
#endif
|
||||
|
||||
#ifdef KC_VFS_METHOD_MNT_IDMAP_ARG
|
||||
#define KC_VFS_NS_DEF struct mnt_idmap *mnt_idmap,
|
||||
#define KC_VFS_NS mnt_idmap,
|
||||
#define KC_VFS_INIT_NS &nop_mnt_idmap,
|
||||
#else
|
||||
#ifdef KC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
#define KC_VFS_NS_DEF struct user_namespace *mnt_user_ns,
|
||||
#define KC_VFS_NS mnt_user_ns,
|
||||
#define KC_VFS_INIT_NS &init_user_ns,
|
||||
#else
|
||||
#define KC_VFS_NS_DEF
|
||||
#define KC_VFS_NS
|
||||
#define KC_VFS_INIT_NS
|
||||
#endif
|
||||
#endif /* KC_VFS_METHOD_MNT_IDMAP_ARG */
|
||||
|
||||
#ifdef KC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
#define kc_bio_alloc bio_alloc
|
||||
#else
|
||||
#include <linux/bio.h>
|
||||
static inline struct bio *kc_bio_alloc(struct block_device *bdev, unsigned short nr_vecs,
|
||||
blk_opf_t opf, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *b = bio_alloc(gfp_mask, nr_vecs);
|
||||
if (b) {
|
||||
kc_bio_set_opf(b, opf);
|
||||
bio_set_dev(b, bdev);
|
||||
if (file->f_pos == 0) {
|
||||
if (filldir(dirent, ".", 1, 1,
|
||||
file->f_path.dentry->d_inode->i_ino, DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 1;
|
||||
}
|
||||
return b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_FIEMAP_PREP
|
||||
#define fiemap_prep(inode, fieinfo, start, len, flags) fiemap_check_flags(fieinfo, flags)
|
||||
#endif
|
||||
if (file->f_pos == 1) {
|
||||
if (filldir(dirent, "..", 2, 1,
|
||||
parent_ino(file->f_path.dentry), DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 2;
|
||||
}
|
||||
|
||||
#ifndef KC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
#define __kernel_old_timeval timeval
|
||||
#define ns_to_kernel_old_timeval(ktime) ns_to_timeval(ktime.tv64)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_SET_SNDTIMEO
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
sock_set_sndtimeo(sock->sk, secs);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
sockptr_t kopt;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
|
||||
kopt = KERNEL_SOCKPTR(&tv);
|
||||
|
||||
return sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO_NEW,
|
||||
kopt, sizeof(tv));
|
||||
}
|
||||
#else
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
struct timeval tv = { .tv_sec = secs, .tv_usec = 0 };
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_SETSOCKOPT_SOCKPTR_T
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
sockptr_t kopt = KERNEL_SOCKPTR(optval);
|
||||
return sock_setsockopt(sock, level, op, kopt, sizeof(optval));
|
||||
}
|
||||
#else
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
return kernel_setsockopt(sock, level, op, (char *)optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_HAVE_TCP_SET_SOCKFN
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepintvl(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepidle(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
tcp_sock_set_user_timeout(sock->sk, val);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
tcp_sock_set_nodelay(sock->sk);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
int optval = 1;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_INODE_DIO_END
|
||||
#define kc_inode_dio_end inode_dio_end
|
||||
#else
|
||||
#define kc_inode_dio_end inode_dio_done
|
||||
#endif
|
||||
|
||||
#ifndef KC_MM_VM_FAULT_T
|
||||
typedef unsigned int vm_fault_t;
|
||||
static inline vm_fault_t vmf_error(int err)
|
||||
{
|
||||
if (err == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#ifndef KC_LIST_LRU_SHRINK_COUNT_WALK
|
||||
/* we don't bother with sc->{nid,memcg} (which doesn't exist in oldest kernels) */
|
||||
static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
return list_lru_count(lru);
|
||||
}
|
||||
static inline unsigned long
|
||||
list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
list_lru_walk_cb isolate, void *cb_arg)
|
||||
{
|
||||
return list_lru_walk(lru, isolate, cb_arg, sc->nr_to_scan);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_LIST_LRU_ADD_OBJ
|
||||
#define list_lru_add_obj list_lru_add
|
||||
#define list_lru_del_obj list_lru_del
|
||||
#endif
|
||||
|
||||
#if defined(KC_LIST_LRU_WALK_CB_LIST_LOCK) || defined(KC_LIST_LRU_WALK_CB_ITEM_LOCK)
|
||||
struct list_lru_one;
|
||||
typedef enum lru_status (*kc_list_lru_walk_cb_t)(struct list_head *item, struct list_lru_one *list,
|
||||
void *cb_arg);
|
||||
struct kc_isolate_args {
|
||||
kc_list_lru_walk_cb_t isolate;
|
||||
void *cb_arg;
|
||||
};
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk);
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg);
|
||||
#else
|
||||
#define kc_list_lru_shrink_walk list_lru_shrink_walk
|
||||
#endif
|
||||
|
||||
#if defined(KC_LIST_LRU_WALK_CB_ITEM_LOCK)
|
||||
/* isolate moved by hand, nr_items updated in walk as _REMOVE returned */
|
||||
static inline void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
|
||||
struct list_head *head)
|
||||
{
|
||||
list_move(item, head);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_STACK_TRACE_SAVE
|
||||
#include <linux/stacktrace.h>
|
||||
static inline unsigned int stack_trace_save(unsigned long *store, unsigned int size,
|
||||
unsigned int skipnr)
|
||||
{
|
||||
struct stack_trace trace = {
|
||||
.entries = store,
|
||||
.max_entries = size,
|
||||
.skip = skipnr,
|
||||
};
|
||||
|
||||
save_stack_trace(&trace);
|
||||
return trace.nr_entries;
|
||||
}
|
||||
|
||||
static inline void stack_trace_print(unsigned long *entries, unsigned int nr_entries, int spaces)
|
||||
{
|
||||
struct stack_trace trace = {
|
||||
.entries = entries,
|
||||
.nr_entries = nr_entries,
|
||||
};
|
||||
|
||||
print_stack_trace(&trace, spaces);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -108,16 +108,6 @@ static inline void scoutfs_key_set_ones(struct scoutfs_key *key)
|
||||
memset(key->__pad, 0, sizeof(key->__pad));
|
||||
}
|
||||
|
||||
static inline bool scoutfs_key_is_ones(struct scoutfs_key *key)
|
||||
{
|
||||
return key->sk_zone == U8_MAX &&
|
||||
key->_sk_first == cpu_to_le64(U64_MAX) &&
|
||||
key->sk_type == U8_MAX &&
|
||||
key->_sk_second == cpu_to_le64(U64_MAX) &&
|
||||
key->_sk_third == cpu_to_le64(U64_MAX) &&
|
||||
key->_sk_fourth == U8_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a -1/0/1 comparison of keys.
|
||||
*
|
||||
@@ -125,8 +115,8 @@ static inline bool scoutfs_key_is_ones(struct scoutfs_key *key)
|
||||
* other alternatives across keys that first differ in any of the
|
||||
* values. Say maybe 20% faster than memcmp.
|
||||
*/
|
||||
static inline int scoutfs_key_compare(const struct scoutfs_key *a,
|
||||
const struct scoutfs_key *b)
|
||||
static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
struct scoutfs_key *b)
|
||||
{
|
||||
return scoutfs_cmp(a->sk_zone, b->sk_zone) ?:
|
||||
scoutfs_cmp(le64_to_cpu(a->_sk_first), le64_to_cpu(b->_sk_first)) ?:
|
||||
@@ -142,10 +132,10 @@ static inline int scoutfs_key_compare(const struct scoutfs_key *a,
|
||||
* 1: a_start > b_end
|
||||
* else 0: ranges overlap
|
||||
*/
|
||||
static inline int scoutfs_key_compare_ranges(const struct scoutfs_key *a_start,
|
||||
const struct scoutfs_key *a_end,
|
||||
const struct scoutfs_key *b_start,
|
||||
const struct scoutfs_key *b_end)
|
||||
static inline int scoutfs_key_compare_ranges(struct scoutfs_key *a_start,
|
||||
struct scoutfs_key *a_end,
|
||||
struct scoutfs_key *b_start,
|
||||
struct scoutfs_key *b_end)
|
||||
{
|
||||
return scoutfs_key_compare(a_end, b_start) < 0 ? -1 :
|
||||
scoutfs_key_compare(a_start, b_end) > 0 ? 1 :
|
||||
|
||||
816
kmod/src/lock.c
816
kmod/src/lock.c
File diff suppressed because it is too large
Load Diff
@@ -6,15 +6,12 @@
|
||||
|
||||
#define SCOUTFS_LKF_REFRESH_INODE 0x01 /* update stale inode from item */
|
||||
#define SCOUTFS_LKF_NONBLOCK 0x02 /* only use already held locks */
|
||||
#define SCOUTFS_LKF_INTERRUPTIBLE 0x04 /* pending signals return -ERESTARTSYS */
|
||||
#define SCOUTFS_LKF_INVALID (~((SCOUTFS_LKF_INTERRUPTIBLE << 1) - 1))
|
||||
#define SCOUTFS_LKF_INVALID (~((SCOUTFS_LKF_NONBLOCK << 1) - 1))
|
||||
|
||||
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
|
||||
|
||||
struct inode_deletion_lock_data;
|
||||
|
||||
/*
|
||||
* A few fields (start, end, refresh_gen, write_seq, granted_mode)
|
||||
* A few fields (start, end, refresh_gen, write_version, granted_mode)
|
||||
* are referenced by code outside lock.c.
|
||||
*/
|
||||
struct scoutfs_lock {
|
||||
@@ -24,22 +21,26 @@ struct scoutfs_lock {
|
||||
struct rb_node node;
|
||||
struct rb_node range_node;
|
||||
u64 refresh_gen;
|
||||
u64 write_seq;
|
||||
u64 write_version;
|
||||
u64 dirty_trans_seq;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct list_head lru_head;
|
||||
wait_queue_head_t waitq;
|
||||
ktime_t grace_deadline;
|
||||
unsigned long request_pending:1,
|
||||
invalidate_pending:1;
|
||||
|
||||
struct list_head inv_head; /* entry in linfo's list of locks with invalidations */
|
||||
struct list_head inv_list; /* list of lock's invalidation requests */
|
||||
struct list_head grant_head;
|
||||
struct scoutfs_net_lock_grant_response grant_resp;
|
||||
struct list_head inv_head;
|
||||
struct scoutfs_net_lock inv_nl;
|
||||
u64 inv_net_id;
|
||||
struct list_head shrink_head;
|
||||
|
||||
spinlock_t cov_list_lock;
|
||||
struct list_head cov_list;
|
||||
|
||||
enum scoutfs_lock_mode mode;
|
||||
enum scoutfs_lock_mode invalidating_mode;
|
||||
unsigned int waiters[SCOUTFS_LOCK_NR_MODES];
|
||||
unsigned int users[SCOUTFS_LOCK_NR_MODES];
|
||||
|
||||
@@ -47,9 +48,6 @@ struct scoutfs_lock {
|
||||
|
||||
/* the forest tracks which log tree last saw bloom bit updates */
|
||||
atomic64_t forest_bloom_nr;
|
||||
|
||||
/* inode deletion tracks some state per lock */
|
||||
struct inode_deletion_lock_data *inode_deletion_data;
|
||||
};
|
||||
|
||||
struct scoutfs_lock_coverage {
|
||||
@@ -59,7 +57,7 @@ struct scoutfs_lock_coverage {
|
||||
};
|
||||
|
||||
int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
struct scoutfs_net_lock *nl);
|
||||
struct scoutfs_net_lock_grant_response *gr);
|
||||
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
@@ -82,14 +80,8 @@ int scoutfs_lock_inodes(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
struct inode *d, struct scoutfs_lock **D_lock);
|
||||
int scoutfs_lock_rename(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 ino, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_rid(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 rid, struct scoutfs_lock **lock);
|
||||
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
@@ -104,13 +96,9 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
|
||||
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
u64 scoutfs_lock_ino_refresh_gen(struct super_block *sb, u64 ino);
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb);
|
||||
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr);
|
||||
|
||||
int scoutfs_lock_setup(struct super_block *sb);
|
||||
void scoutfs_lock_unmount_begin(struct super_block *sb);
|
||||
void scoutfs_lock_flush_invalidate(struct super_block *sb);
|
||||
void scoutfs_lock_shutdown(struct super_block *sb);
|
||||
void scoutfs_lock_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@
|
||||
#include "tseq.h"
|
||||
#include "spbm.h"
|
||||
#include "block.h"
|
||||
#include "btree.h"
|
||||
#include "msg.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "lock_server.h"
|
||||
#include "recov.h"
|
||||
|
||||
/*
|
||||
* The scoutfs server implements a simple lock service. Client mounts
|
||||
@@ -56,11 +56,14 @@
|
||||
* Message requests and responses are reliably delivered in order across
|
||||
* reconnection.
|
||||
*
|
||||
* As a new server comes up it recovers lock state from existing clients
|
||||
* which were connected to a previous lock server. Recover requests are
|
||||
* sent to clients as they connect and they respond with all there
|
||||
* locks. Once all clients and locks are accounted for normal
|
||||
* processing can resume.
|
||||
* The server maintains a persistent record of connected clients. A new
|
||||
* server instance discovers these and waits for previously connected
|
||||
* clients to reconnect and recover their state before proceeding. If
|
||||
* clients don't reconnect they are forcefully prevented from unsafely
|
||||
* accessing the shared persistent storage. (fenced, according to the
|
||||
* rules of the platform.. could range from being powered off to having
|
||||
* their switch port disabled to having their local block device set
|
||||
* read-only.)
|
||||
*
|
||||
* The lock server doesn't respond to memory pressure. The only way
|
||||
* locks are freed is if they are invalidated to null on behalf of a
|
||||
@@ -74,12 +77,19 @@ struct lock_server_info {
|
||||
struct super_block *sb;
|
||||
|
||||
spinlock_t lock;
|
||||
struct mutex mutex;
|
||||
struct rb_root locks_root;
|
||||
|
||||
struct scoutfs_spbm recovery_pending;
|
||||
struct delayed_work recovery_dwork;
|
||||
|
||||
struct scoutfs_tseq_tree tseq_tree;
|
||||
struct dentry *tseq_dentry;
|
||||
struct scoutfs_tseq_tree stats_tseq_tree;
|
||||
struct dentry *stats_tseq_dentry;
|
||||
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
|
||||
atomic64_t write_version;
|
||||
};
|
||||
|
||||
#define DECLARE_LOCK_SERVER_INFO(sb, name) \
|
||||
@@ -106,9 +116,6 @@ struct server_lock_node {
|
||||
struct list_head granted;
|
||||
struct list_head requested;
|
||||
struct list_head invalidated;
|
||||
|
||||
struct scoutfs_tseq_entry stats_tseq_entry;
|
||||
u64 stats[SLT_NR];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -153,30 +160,30 @@ enum {
|
||||
*/
|
||||
static void add_client_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
struct client_lock_entry *c_ent)
|
||||
struct client_lock_entry *clent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (list_empty(&c_ent->head))
|
||||
list_add_tail(&c_ent->head, list);
|
||||
if (list_empty(&clent->head))
|
||||
list_add_tail(&clent->head, list);
|
||||
else
|
||||
list_move_tail(&c_ent->head, list);
|
||||
list_move_tail(&clent->head, list);
|
||||
|
||||
c_ent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
clent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
list == &snode->requested ? OL_REQUESTED :
|
||||
OL_INVALIDATED;
|
||||
}
|
||||
|
||||
static void free_client_entry(struct lock_server_info *inf,
|
||||
struct server_lock_node *snode,
|
||||
struct client_lock_entry *c_ent)
|
||||
struct client_lock_entry *clent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (!list_empty(&c_ent->head))
|
||||
list_del_init(&c_ent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
kfree(c_ent);
|
||||
if (!list_empty(&clent->head))
|
||||
list_del_init(&clent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &clent->tseq_entry);
|
||||
kfree(clent);
|
||||
}
|
||||
|
||||
static bool invalid_mode(u8 mode)
|
||||
@@ -202,48 +209,21 @@ static u8 invalidation_mode(u8 granted, u8 requested)
|
||||
|
||||
/*
|
||||
* Return true of the client lock instances described by the entries can
|
||||
* be granted at the same time. There's only three cases where this is
|
||||
* true.
|
||||
*
|
||||
* First, the two locks are both of the same mode that allows full
|
||||
* sharing -- read and write only. The only point of these modes is
|
||||
* that everyone can share them.
|
||||
*
|
||||
* Second, a write lock gives the client permission to read as well.
|
||||
* This means that a client can upgrade its read lock to a write lock
|
||||
* without having to invalidate the existing read and drop caches.
|
||||
*
|
||||
* Third, null locks are always compatible between clients. It's as
|
||||
* though the client with the null lock has no lock at all. But it's
|
||||
* never compatible with all locks on the client requesting null.
|
||||
* Sending invalidations for existing locks on a client when we get a
|
||||
* null request is how we resolve races in shrinking locks -- we turn it
|
||||
* into the unsolicited remote invalidation case.
|
||||
*
|
||||
* All other mode and client combinations can not be shared, most
|
||||
* typically a write lock invalidating all other non-write holders to
|
||||
* drop caches and force a read after the write has completed.
|
||||
* be granted at the same time. Typically this only means they're both
|
||||
* modes that are compatible between nodes. In addition there's the
|
||||
* special case where a read lock on a client is compatible with a write
|
||||
* lock on the same client because the client's cache covered by the
|
||||
* read lock is still valid if they get a write lock.
|
||||
*/
|
||||
static bool client_entries_compatible(struct client_lock_entry *granted,
|
||||
struct client_lock_entry *requested)
|
||||
{
|
||||
/* only read and write_only can be full shared */
|
||||
if ((granted->mode == requested->mode) &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ || granted->mode == SCOUTFS_LOCK_WRITE_ONLY))
|
||||
return true;
|
||||
|
||||
/* _write includes reading, so a client can upgrade its read to write */
|
||||
if (granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE)
|
||||
return true;
|
||||
|
||||
/* null is always compatible across clients, never within a client */
|
||||
if ((granted->rid != requested->rid) &&
|
||||
(granted->mode == SCOUTFS_LOCK_NULL || requested->mode == SCOUTFS_LOCK_NULL))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return (granted->mode == requested->mode &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ ||
|
||||
granted->mode == SCOUTFS_LOCK_WRITE_ONLY)) ||
|
||||
(granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -325,8 +305,6 @@ static struct server_lock_node *alloc_server_lock(struct lock_server_info *inf,
|
||||
snode = get_server_lock(inf, key, ins, false);
|
||||
if (snode != ins)
|
||||
kfree(ins);
|
||||
else
|
||||
scoutfs_tseq_add(&inf->stats_tseq_tree, &snode->stats_tseq_entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,37 +322,33 @@ static void put_server_lock(struct lock_server_info *inf,
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
|
||||
if (atomic_dec_and_test(&snode->refcount) &&
|
||||
list_empty(&snode->granted) &&
|
||||
list_empty(&snode->requested) &&
|
||||
list_empty(&snode->invalidated)) {
|
||||
spin_lock(&inf->lock);
|
||||
rb_erase(&snode->node, &inf->locks_root);
|
||||
spin_unlock(&inf->lock);
|
||||
should_free = true;
|
||||
}
|
||||
|
||||
spin_unlock(&inf->lock);
|
||||
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
if (should_free) {
|
||||
scoutfs_tseq_del(&inf->stats_tseq_tree, &snode->stats_tseq_entry);
|
||||
if (should_free)
|
||||
kfree(snode);
|
||||
}
|
||||
}
|
||||
|
||||
static struct client_lock_entry *find_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
u64 rid)
|
||||
{
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *clent;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
list_for_each_entry(c_ent, list, head) {
|
||||
if (c_ent->rid == rid)
|
||||
return c_ent;
|
||||
list_for_each_entry(clent, list, head) {
|
||||
if (clent->rid == rid)
|
||||
return clent;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -393,7 +367,7 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
u64 net_id, struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *clent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -405,29 +379,27 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = net_id;
|
||||
c_ent->mode = nl->new_mode;
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = net_id;
|
||||
clent->mode = nl->new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nl->key);
|
||||
if (snode == NULL) {
|
||||
kfree(c_ent);
|
||||
kfree(clent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snode->stats[SLT_REQUEST]++;
|
||||
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
out:
|
||||
@@ -446,7 +418,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *clent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -458,27 +430,25 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX should always have a server lock here? */
|
||||
/* XXX should always have a server lock here? recovery? */
|
||||
snode = get_server_lock(inf, &nl->key, NULL, false);
|
||||
if (!snode) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snode->stats[SLT_RESPONSE]++;
|
||||
|
||||
c_ent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!c_ent) {
|
||||
clent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!clent) {
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nl->new_mode == SCOUTFS_LOCK_NULL) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
free_client_entry(inf, snode, clent);
|
||||
} else {
|
||||
c_ent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
clent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
}
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
@@ -503,40 +473,31 @@ out:
|
||||
* so we unlock the snode mutex.
|
||||
*
|
||||
* All progress must wait for all clients to finish with recovery
|
||||
* because we don't know which locks they'll hold. Once recover
|
||||
* finishes the server calls us to kick all the locks that were waiting
|
||||
* during recovery.
|
||||
*
|
||||
* The calling server shuts down if we return errors indicating that we
|
||||
* weren't able to ensure forward progress in the lock state machine.
|
||||
*
|
||||
* Failure to send to a disconnected client is not a fatal error.
|
||||
* During normal disconnection the client's state is removed before
|
||||
* their connection is destroyed. We can't use state to try and send to
|
||||
* a non-existing connection. But a client that fails to reconnect is
|
||||
* disconnected before being fenced. If we have multiple disconnected
|
||||
* clients we can try to send to one while cleaning up another. If
|
||||
* they've uncleanly disconnected their locks are going to be removed
|
||||
* and the lock can make forward progress again. Or we'll shutdown for
|
||||
* failure to fence.
|
||||
* because we don't know which locks they'll hold. The unlocked
|
||||
* recovery_pending test here is OK. It's filled by setup before
|
||||
* anything runs. It's emptied by recovery completion. We can get a
|
||||
* false nonempty result if we race with recovery completion, but that's
|
||||
* OK because recovery completion processes all the locks that have
|
||||
* requests after emptying, including the unlikely loser of that race.
|
||||
*/
|
||||
static int process_waiting_requests(struct super_block *sb,
|
||||
struct server_lock_node *snode)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct scoutfs_net_lock_grant_response gres;
|
||||
struct scoutfs_net_lock nl;
|
||||
struct client_lock_entry *req;
|
||||
struct client_lock_entry *req_tmp;
|
||||
struct client_lock_entry *gr;
|
||||
struct client_lock_entry *gr_tmp;
|
||||
u64 seq;
|
||||
u64 wv;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
/* processing waits for all invalidation responses or recovery */
|
||||
if (!list_empty(&snode->invalidated) ||
|
||||
scoutfs_recov_next_pending(sb, 0, SCOUTFS_RECOV_LOCKS) != 0) {
|
||||
!scoutfs_spbm_empty(&inf->recovery_pending)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -560,7 +521,6 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
trace_scoutfs_lock_message(sb, SLT_SERVER,
|
||||
SLT_INVALIDATE, SLT_REQUEST,
|
||||
gr->rid, 0, &nl);
|
||||
snode->stats[SLT_INVALIDATE]++;
|
||||
|
||||
add_client_entry(snode, &snode->invalidated, gr);
|
||||
}
|
||||
@@ -571,7 +531,6 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
|
||||
nl.key = snode->key;
|
||||
nl.new_mode = req->mode;
|
||||
nl.write_seq = 0;
|
||||
|
||||
/* see if there's an existing compatible grant to replace */
|
||||
gr = find_entry(snode, &snode->granted, req->rid);
|
||||
@@ -584,20 +543,21 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
|
||||
if (nl.new_mode == SCOUTFS_LOCK_WRITE ||
|
||||
nl.new_mode == SCOUTFS_LOCK_WRITE_ONLY) {
|
||||
/* doesn't commit seq update, recovered with locks */
|
||||
seq = scoutfs_server_next_seq(sb);
|
||||
nl.write_seq = cpu_to_le64(seq);
|
||||
wv = atomic64_inc_return(&inf->write_version);
|
||||
nl.write_version = cpu_to_le64(wv);
|
||||
}
|
||||
|
||||
gres.nl = nl;
|
||||
scoutfs_server_get_roots(sb, &gres.roots);
|
||||
|
||||
ret = scoutfs_server_lock_response(sb, req->rid,
|
||||
req->net_id, &nl);
|
||||
req->net_id, &gres);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_lock_message(sb, SLT_SERVER, SLT_GRANT,
|
||||
SLT_RESPONSE, req->rid,
|
||||
req->net_id, &nl);
|
||||
snode->stats[SLT_GRANT]++;
|
||||
|
||||
/* don't track null client locks, track all else */
|
||||
if (req->mode == SCOUTFS_LOCK_NULL)
|
||||
@@ -610,46 +570,92 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
out:
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
/* disconnected clients will be fenced, trying to send to them isn't fatal */
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void init_lock_clients_key(struct scoutfs_key *key, u64 rid)
|
||||
{
|
||||
*key = (struct scoutfs_key) {
|
||||
.sk_zone = SCOUTFS_LOCK_CLIENTS_ZONE,
|
||||
.sklc_rid = cpu_to_le64(rid),
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The server received a greeting from a client for the first time. If
|
||||
* the client is in lock recovery then we send the initial lock request.
|
||||
* the client had already talked to the server then we must find an
|
||||
* existing record for it and should begin recovery. If it doesn't have
|
||||
* a record then its timed out and we can't allow it to reconnect. If
|
||||
* we're creating a new record for a client we can see EEXIST if the
|
||||
* greeting is resent to a new server after the record was committed but
|
||||
* before the response was received by the client.
|
||||
*
|
||||
* This is running in concurrent client greeting processing contexts.
|
||||
*/
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid)
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
|
||||
bool should_exist)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
|
||||
init_lock_clients_key(&key, rid);
|
||||
|
||||
mutex_lock(&inf->mutex);
|
||||
if (should_exist) {
|
||||
ret = scoutfs_btree_lookup(sb, &super->lock_clients, &key,
|
||||
&iref);
|
||||
if (ret == 0)
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
} else {
|
||||
ret = scoutfs_btree_insert(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients,
|
||||
&key, NULL, 0);
|
||||
if (ret == -EEXIST)
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&inf->mutex);
|
||||
|
||||
if (should_exist && ret == 0) {
|
||||
scoutfs_key_set_zeros(&key);
|
||||
ret = scoutfs_server_lock_recover_request(sb, rid, &key);
|
||||
} else {
|
||||
ret = 0;
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* All clients have finished lock recovery, we can make forward process
|
||||
* on all the queued requests that were waiting on recovery.
|
||||
* A client sent their last recovery response and can exit recovery. If
|
||||
* they were the last client in recovery then we can process all the
|
||||
* server locks that had requests.
|
||||
*/
|
||||
int scoutfs_lock_server_finished_recovery(struct super_block *sb)
|
||||
static int finished_recovery(struct super_block *sb, u64 rid, bool cancel)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
bool still_pending;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
scoutfs_spbm_clear(&inf->recovery_pending, rid);
|
||||
still_pending = !scoutfs_spbm_empty(&inf->recovery_pending);
|
||||
spin_unlock(&inf->lock);
|
||||
if (still_pending)
|
||||
return 0;
|
||||
|
||||
if (cancel)
|
||||
cancel_delayed_work_sync(&inf->recovery_dwork);
|
||||
|
||||
scoutfs_key_set_zeros(&key);
|
||||
|
||||
scoutfs_info(sb, "all lock clients recovered");
|
||||
|
||||
while ((snode = get_server_lock(inf, &key, NULL, true))) {
|
||||
|
||||
key = snode->key;
|
||||
@@ -667,6 +673,14 @@ int scoutfs_lock_server_finished_recovery(struct super_block *sb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_max_write_version(struct lock_server_info *inf, u64 new)
|
||||
{
|
||||
u64 old;
|
||||
|
||||
while (new > (old = atomic64_read(&inf->write_version)) &&
|
||||
(atomic64_cmpxchg(&inf->write_version, old, new) != old));
|
||||
}
|
||||
|
||||
/*
|
||||
* We sent a lock recover request to the client when we received its
|
||||
* greeting while in recovery. Here we instantiate all the locks it
|
||||
@@ -678,61 +692,62 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *existing;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *clent;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* client must be in recovery */
|
||||
if (!scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
|
||||
spin_lock(&inf->lock);
|
||||
if (!scoutfs_spbm_test(&inf->recovery_pending, rid))
|
||||
ret = -EINVAL;
|
||||
spin_unlock(&inf->lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* client has sent us all their locks */
|
||||
if (nlr->nr == 0) {
|
||||
scoutfs_server_recov_finish(sb, rid, SCOUTFS_RECOV_LOCKS);
|
||||
ret = 0;
|
||||
ret = finished_recovery(sb, rid, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < le16_to_cpu(nlr->nr); i++) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = 0;
|
||||
c_ent->mode = nlr->locks[i].new_mode;
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = 0;
|
||||
clent->mode = nlr->locks[i].new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nlr->locks[i].key);
|
||||
if (snode == NULL) {
|
||||
kfree(c_ent);
|
||||
kfree(clent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
existing = find_entry(snode, &snode->granted, rid);
|
||||
if (existing) {
|
||||
kfree(c_ent);
|
||||
kfree(clent);
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
/* make sure next core seq is greater than all lock write seq */
|
||||
scoutfs_server_set_seq_if_greater(sb,
|
||||
le64_to_cpu(nlr->locks[i].write_seq));
|
||||
/* make sure next write lock is greater than all recovered */
|
||||
set_max_write_version(inf,
|
||||
le64_to_cpu(nlr->locks[i].write_version));
|
||||
}
|
||||
|
||||
/* send request for next batch of keys */
|
||||
@@ -744,16 +759,102 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_rid_and_put_ref(struct scoutfs_btree_item_ref *iref, u64 *rid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (iref->val_len == 0) {
|
||||
*rid = le64_to_cpu(iref->key->sklc_rid);
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(iref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work executes if enough time passes without all of the clients
|
||||
* finishing with recovery and canceling the work. We walk through the
|
||||
* client records and find any that still have their recovery pending.
|
||||
*/
|
||||
static void scoutfs_lock_server_recovery_timeout(struct work_struct *work)
|
||||
{
|
||||
struct lock_server_info *inf = container_of(work,
|
||||
struct lock_server_info,
|
||||
recovery_dwork.work);
|
||||
struct super_block *sb = inf->sb;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
bool timed_out;
|
||||
u64 rid;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* we enter recovery if there are any client records */
|
||||
for (rid = 0; ; rid++) {
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
if (ret == 0)
|
||||
ret = get_rid_and_put_ref(&iref, &rid);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
if (scoutfs_spbm_test(&inf->recovery_pending, rid)) {
|
||||
scoutfs_spbm_clear(&inf->recovery_pending, rid);
|
||||
timed_out = true;
|
||||
} else {
|
||||
timed_out = false;
|
||||
}
|
||||
spin_unlock(&inf->lock);
|
||||
|
||||
if (!timed_out)
|
||||
continue;
|
||||
|
||||
scoutfs_err(sb, "client rid %016llx lock recovery timed out",
|
||||
rid);
|
||||
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients, &key);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
out:
|
||||
/* force processing all pending lock requests */
|
||||
if (ret == 0)
|
||||
ret = finished_recovery(sb, 0, false);
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server saw err %d while timing out clients, shutting down", ret);
|
||||
scoutfs_server_abort(sb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A client is leaving the lock service. They aren't using locks and
|
||||
* won't send any more requests. We tear down all the state we had for
|
||||
* them. This can be called multiple times for a given client as their
|
||||
* farewell is resent to new servers. It's OK to not find any state.
|
||||
* If we fail to delete a persistent entry then we have to shut down and
|
||||
* hope that the next server has more luck.
|
||||
*/
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *c_ent;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *tmp;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
@@ -761,7 +862,20 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
bool freed;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&inf->mutex);
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients, &key);
|
||||
mutex_unlock(&inf->mutex);
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_key_set_zeros(&key);
|
||||
|
||||
while ((snode = get_server_lock(inf, &key, NULL, true))) {
|
||||
|
||||
freed = false;
|
||||
@@ -770,9 +884,9 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
(list == &snode->requested) ? &snode->invalidated :
|
||||
NULL) {
|
||||
|
||||
list_for_each_entry_safe(c_ent, tmp, list, head) {
|
||||
if (c_ent->rid == rid) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
list_for_each_entry_safe(clent, tmp, list, head) {
|
||||
if (clent->rid == rid) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
freed = true;
|
||||
}
|
||||
}
|
||||
@@ -795,7 +909,7 @@ out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server err %d during client rid %016llx farewell, shutting down",
|
||||
ret, rid);
|
||||
scoutfs_server_stop(sb);
|
||||
scoutfs_server_abort(sb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -833,35 +947,36 @@ static char *lock_on_list_string(u8 on_list)
|
||||
static void lock_server_tseq_show(struct seq_file *m,
|
||||
struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
struct client_lock_entry *c_ent = container_of(ent,
|
||||
struct client_lock_entry *clent = container_of(ent,
|
||||
struct client_lock_entry,
|
||||
tseq_entry);
|
||||
struct server_lock_node *snode = c_ent->snode;
|
||||
struct server_lock_node *snode = clent->snode;
|
||||
|
||||
seq_printf(m, SK_FMT" %s %s rid %016llx net_id %llu\n",
|
||||
SK_ARG(&snode->key), lock_mode_string(c_ent->mode),
|
||||
lock_on_list_string(c_ent->on_list), c_ent->rid,
|
||||
c_ent->net_id);
|
||||
}
|
||||
|
||||
static void stats_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
struct server_lock_node *snode = container_of(ent, struct server_lock_node,
|
||||
stats_tseq_entry);
|
||||
|
||||
seq_printf(m, SK_FMT" req %llu inv %llu rsp %llu gr %llu\n",
|
||||
SK_ARG(&snode->key), snode->stats[SLT_REQUEST], snode->stats[SLT_INVALIDATE],
|
||||
snode->stats[SLT_RESPONSE], snode->stats[SLT_GRANT]);
|
||||
SK_ARG(&snode->key), lock_mode_string(clent->mode),
|
||||
lock_on_list_string(clent->on_list), clent->rid,
|
||||
clent->net_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the lock server. This is called before networking can deliver
|
||||
* requests.
|
||||
* requests. If we find existing client records then we enter recovery.
|
||||
* Lock request processing is deferred until recovery is resolved for
|
||||
* all the existing clients, either they reconnect and replay locks or
|
||||
* we time them out.
|
||||
*/
|
||||
int scoutfs_lock_server_setup(struct super_block *sb)
|
||||
int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 max_vers)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct lock_server_info *inf;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
unsigned int nr;
|
||||
u64 rid;
|
||||
int ret;
|
||||
|
||||
inf = kzalloc(sizeof(struct lock_server_info), GFP_KERNEL);
|
||||
if (!inf)
|
||||
@@ -869,9 +984,15 @@ int scoutfs_lock_server_setup(struct super_block *sb)
|
||||
|
||||
inf->sb = sb;
|
||||
spin_lock_init(&inf->lock);
|
||||
mutex_init(&inf->mutex);
|
||||
inf->locks_root = RB_ROOT;
|
||||
scoutfs_spbm_init(&inf->recovery_pending);
|
||||
INIT_DELAYED_WORK(&inf->recovery_dwork,
|
||||
scoutfs_lock_server_recovery_timeout);
|
||||
scoutfs_tseq_tree_init(&inf->tseq_tree, lock_server_tseq_show);
|
||||
scoutfs_tseq_tree_init(&inf->stats_tseq_tree, stats_tseq_show);
|
||||
inf->alloc = alloc;
|
||||
inf->wri = wri;
|
||||
atomic64_set(&inf->write_version, max_vers); /* inc_return gives +1 */
|
||||
|
||||
inf->tseq_dentry = scoutfs_tseq_create("server_locks", sbi->debug_root,
|
||||
&inf->tseq_tree);
|
||||
@@ -880,17 +1001,38 @@ int scoutfs_lock_server_setup(struct super_block *sb)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
inf->stats_tseq_dentry = scoutfs_tseq_create("server_lock_stats", sbi->debug_root,
|
||||
&inf->stats_tseq_tree);
|
||||
if (!inf->stats_tseq_dentry) {
|
||||
debugfs_remove(inf->tseq_dentry);
|
||||
kfree(inf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sbi->lock_server_info = inf;
|
||||
|
||||
return 0;
|
||||
/* we enter recovery if there are any client records */
|
||||
nr = 0;
|
||||
for (rid = 0; ; rid++) {
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
|
||||
if (ret == -ENOENT)
|
||||
break;
|
||||
if (ret == 0)
|
||||
ret = get_rid_and_put_ref(&iref, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_spbm_set(&inf->recovery_pending, rid);
|
||||
if (ret)
|
||||
goto out;
|
||||
nr++;
|
||||
|
||||
if (rid == U64_MAX)
|
||||
break;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
if (nr) {
|
||||
schedule_delayed_work(&inf->recovery_dwork,
|
||||
msecs_to_jiffies(LOCK_SERVER_RECOVERY_MS));
|
||||
scoutfs_info(sb, "waiting for %u lock clients to recover", nr);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -903,13 +1045,14 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct server_lock_node *snode;
|
||||
struct server_lock_node *stmp;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *ctmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (inf) {
|
||||
cancel_delayed_work_sync(&inf->recovery_dwork);
|
||||
|
||||
debugfs_remove(inf->tseq_dentry);
|
||||
debugfs_remove(inf->stats_tseq_dentry);
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(snode, stmp,
|
||||
&inf->locks_root, node) {
|
||||
@@ -919,14 +1062,16 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
list_splice_init(&snode->invalidated, &list);
|
||||
|
||||
mutex_lock(&snode->mutex);
|
||||
list_for_each_entry_safe(c_ent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
list_for_each_entry_safe(clent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
}
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
kfree(snode);
|
||||
}
|
||||
|
||||
scoutfs_spbm_destroy(&inf->recovery_pending);
|
||||
|
||||
kfree(inf);
|
||||
sbi->lock_server_info = NULL;
|
||||
}
|
||||
|
||||
@@ -3,15 +3,17 @@
|
||||
|
||||
int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock_recover *nlr);
|
||||
int scoutfs_lock_server_finished_recovery(struct super_block *sb);
|
||||
int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
u64 net_id, struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid);
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
|
||||
bool should_exist);
|
||||
int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid);
|
||||
|
||||
int scoutfs_lock_server_setup(struct super_block *sb);
|
||||
int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 max_vers);
|
||||
void scoutfs_lock_server_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include <linux/bitops.h>
|
||||
#include "key.h"
|
||||
#include "counters.h"
|
||||
#include "super.h"
|
||||
|
||||
void __printf(4, 5) scoutfs_msg(struct super_block *sb, const char *prefix,
|
||||
const char *str, const char *fmt, ...);
|
||||
@@ -24,9 +23,6 @@ do { \
|
||||
#define scoutfs_info(sb, fmt, args...) \
|
||||
scoutfs_msg_check(sb, KERN_INFO, "", fmt, ##args)
|
||||
|
||||
#define scoutfs_tprintk(sb, fmt, args...) \
|
||||
trace_printk(SCSBF " " fmt "\n", SCSB_ARGS(sb), ##args);
|
||||
|
||||
#define scoutfs_bug_on(sb, cond, fmt, args...) \
|
||||
do { \
|
||||
if (cond) { \
|
||||
@@ -35,12 +31,6 @@ do { \
|
||||
} \
|
||||
} while (0) \
|
||||
|
||||
#define scoutfs_bug_on_err(sb, err, fmt, args...) \
|
||||
do { \
|
||||
__typeof__(err) _err = (err); \
|
||||
scoutfs_bug_on(sb, _err < 0 && _err != -ENOLINK, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Each message is only generated once per volume. Remounting resets
|
||||
* the messages.
|
||||
|
||||
880
kmod/src/net.c
880
kmod/src/net.c
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,10 @@
|
||||
#ifndef _SCOUTFS_NET_H_
|
||||
#define _SCOUTFS_NET_H_
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/in.h>
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
|
||||
struct scoutfs_work_list {
|
||||
struct work_struct work;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct scoutfs_net_connection;
|
||||
|
||||
/* These are called in their own blocking context */
|
||||
@@ -57,7 +49,6 @@ struct scoutfs_net_connection {
|
||||
u64 greeting_id;
|
||||
struct sockaddr_in sockname;
|
||||
struct sockaddr_in peername;
|
||||
struct sockaddr_in last_peername;
|
||||
|
||||
struct list_head accepted_head;
|
||||
struct scoutfs_net_connection *listening_conn;
|
||||
@@ -67,12 +58,8 @@ struct scoutfs_net_connection {
|
||||
u64 next_send_id;
|
||||
struct list_head send_queue;
|
||||
struct list_head resend_queue;
|
||||
struct rb_root req_root;
|
||||
struct rb_root resp_root;
|
||||
|
||||
atomic64_t recv_seq;
|
||||
unsigned int ordered_proc_nr;
|
||||
struct scoutfs_work_list *ordered_proc_wlists;
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct work_struct listen_work;
|
||||
@@ -112,16 +99,6 @@ static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
|
||||
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
|
||||
}
|
||||
|
||||
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_in *sin)
|
||||
{
|
||||
BUG_ON(sin->sin_family != AF_INET);
|
||||
|
||||
memset(addr, 0, sizeof(union scoutfs_inet_addr));
|
||||
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
addr->v4.addr = be32_to_le32(sin->sin_addr.s_addr);
|
||||
addr->v4.port = be16_to_le16(sin->sin_port);
|
||||
}
|
||||
|
||||
struct scoutfs_net_connection *
|
||||
scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
scoutfs_net_notify_t notify_up,
|
||||
@@ -146,6 +123,9 @@ int scoutfs_net_submit_request_node(struct super_block *sb,
|
||||
u64 rid, u8 cmd, void *arg, u16 arg_len,
|
||||
scoutfs_net_response_t resp_func,
|
||||
void *resp_data, u64 *id_ret);
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id);
|
||||
int scoutfs_net_sync_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, void *arg, unsigned arg_len,
|
||||
|
||||
889
kmod/src/omap.c
889
kmod/src/omap.c
@@ -1,889 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
#include "cmp.h"
|
||||
#include "inode.h"
|
||||
#include "client.h"
|
||||
#include "server.h"
|
||||
#include "omap.h"
|
||||
#include "recov.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
* As a client removes an inode from its cache with an nlink of 0 it
|
||||
* needs to decide if it is the last client using the inode and should
|
||||
* fully delete all the inode's items. It needs to know if other mounts
|
||||
* still have the inode in use.
|
||||
*
|
||||
* We need a way to communicate between mounts that an inode is in use.
|
||||
* We don't want to pay the synchronous per-file locking round trip
|
||||
* costs associated with per-inode open locks that you'd typically see
|
||||
* in systems to solve this problem. The first prototypes of this
|
||||
* tracked open file handles so this was coined the open map, though it
|
||||
* now tracks cached inodes.
|
||||
*
|
||||
* Clients maintain bitmaps that cover groups of inodes. As inodes
|
||||
* enter the cache their bit is set and as the inode is evicted the bit
|
||||
* is cleared. As deletion is attempted, either by scanning orphans or
|
||||
* evicting an inode with an nlink of 0, messages are sent around the
|
||||
* cluster to get the current bitmaps for that inode's group from all
|
||||
* active mounts. If the inode's bit is clear then it can be deleted.
|
||||
*
|
||||
* This layer maintains a list of client rids to send messages to. The
|
||||
* server calls us as clients enter and leave the cluster. We can't
|
||||
* process requests until all clients are present as a server starts up
|
||||
* so we hook into recovery and delay processing until all previously
|
||||
* existing clients are recovered or fenced.
|
||||
*/
|
||||
|
||||
struct omap_rid_list {
|
||||
int nr_rids;
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
struct omap_rid_entry {
|
||||
struct list_head head;
|
||||
u64 rid;
|
||||
};
|
||||
|
||||
struct omap_info {
|
||||
/* client */
|
||||
struct rhashtable group_ht;
|
||||
|
||||
/* server */
|
||||
struct rhashtable req_ht;
|
||||
struct llist_head requests;
|
||||
spinlock_t lock;
|
||||
struct omap_rid_list rids;
|
||||
atomic64_t next_req_id;
|
||||
};
|
||||
|
||||
#define DECLARE_OMAP_INFO(sb, name) \
|
||||
struct omap_info *name = SCOUTFS_SB(sb)->omap_info
|
||||
|
||||
/*
|
||||
* The presence of an inode in the inode sets its bit in the lock
|
||||
* group's bitmap.
|
||||
*
|
||||
* We don't want to add additional global synchronization of inode cache
|
||||
* maintenance so these are tracked in an rcu hash table. Once their
|
||||
* total reaches zero they're removed from the hash and queued for
|
||||
* freeing and readers should ignore them.
|
||||
*/
|
||||
struct omap_group {
|
||||
struct super_block *sb;
|
||||
struct rhash_head ht_head;
|
||||
struct rcu_head rcu;
|
||||
u64 nr;
|
||||
spinlock_t lock;
|
||||
unsigned int total;
|
||||
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
|
||||
};
|
||||
|
||||
#define trace_group(sb, which, group, bit_nr) \
|
||||
do { \
|
||||
__typeof__(group) _grp = (group); \
|
||||
__typeof__(bit_nr) _nr = (bit_nr); \
|
||||
\
|
||||
trace_scoutfs_omap_group_##which(sb, _grp, _grp->nr, _grp->total, _nr); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Each request is initialized with the rids of currently mounted
|
||||
* clients. As each responds we remove their rid and send the response
|
||||
* once everyone has contributed.
|
||||
*
|
||||
* The request frequency will typically be low, but in a mass rm -rf
|
||||
* load we will see O(groups * clients) messages flying around.
|
||||
*/
|
||||
struct omap_request {
|
||||
struct llist_node llnode;
|
||||
struct rhash_head ht_head;
|
||||
struct rcu_head rcu;
|
||||
spinlock_t lock;
|
||||
u64 client_rid;
|
||||
u64 client_id;
|
||||
struct omap_rid_list rids;
|
||||
struct scoutfs_open_ino_map map;
|
||||
};
|
||||
|
||||
static inline void init_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
INIT_LIST_HEAD(&list->head);
|
||||
list->nr_rids = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Negative searches almost never happen.
|
||||
*/
|
||||
static struct omap_rid_entry *find_rid(struct omap_rid_list *list, u64 rid)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, &list->head, head) {
|
||||
if (rid == entry->rid)
|
||||
return entry;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int free_rid(struct omap_rid_list *list, struct omap_rid_entry *entry)
|
||||
{
|
||||
int nr;
|
||||
|
||||
list_del(&entry->head);
|
||||
nr = --list->nr_rids;
|
||||
|
||||
kfree(entry);
|
||||
return nr;
|
||||
}
|
||||
|
||||
static void free_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &list->head, head)
|
||||
free_rid(list, entry);
|
||||
}
|
||||
|
||||
static int copy_rids(struct omap_rid_list *to, struct omap_rid_list *from, spinlock_t *from_lock)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *src;
|
||||
struct omap_rid_entry *dst;
|
||||
int nr;
|
||||
|
||||
spin_lock(from_lock);
|
||||
|
||||
while (to->nr_rids != from->nr_rids) {
|
||||
nr = from->nr_rids;
|
||||
spin_unlock(from_lock);
|
||||
|
||||
while (to->nr_rids < nr) {
|
||||
entry = kmalloc(sizeof(struct omap_rid_entry), GFP_NOFS);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
list_add_tail(&entry->head, &to->head);
|
||||
to->nr_rids++;
|
||||
}
|
||||
|
||||
while (to->nr_rids > nr) {
|
||||
entry = list_first_entry(&to->head, struct omap_rid_entry, head);
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
to->nr_rids--;
|
||||
}
|
||||
|
||||
spin_lock(from_lock);
|
||||
}
|
||||
|
||||
dst = list_first_entry(&to->head, struct omap_rid_entry, head);
|
||||
list_for_each_entry(src, &from->head, head) {
|
||||
dst->rid = src->rid;
|
||||
dst = list_next_entry(dst, head);
|
||||
}
|
||||
|
||||
spin_unlock(from_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_rids(struct omap_rid_list *list)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &list->head, head) {
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr)
|
||||
{
|
||||
*group_nr = ino >> SCOUTFS_OPEN_INO_MAP_SHIFT;
|
||||
*bit_nr = ino & SCOUTFS_OPEN_INO_MAP_MASK;
|
||||
}
|
||||
|
||||
static struct omap_group *alloc_group(struct super_block *sb, u64 group_nr)
|
||||
{
|
||||
struct omap_group *group;
|
||||
|
||||
group = kzalloc(sizeof(struct omap_group), GFP_NOFS);
|
||||
if (group) {
|
||||
group->sb = sb;
|
||||
group->nr = group_nr;
|
||||
spin_lock_init(&group->lock);
|
||||
|
||||
trace_group(sb, alloc, group, -1);
|
||||
}
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
static void free_group(struct super_block *sb, struct omap_group *group)
|
||||
{
|
||||
trace_group(sb, free, group, -1);
|
||||
kfree(group);
|
||||
}
|
||||
|
||||
static void free_group_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct omap_group *group = container_of(rcu, struct omap_group, rcu);
|
||||
|
||||
free_group(group->sb, group);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params group_ht_params = {
|
||||
.key_len = member_sizeof(struct omap_group, nr),
|
||||
.key_offset = offsetof(struct omap_group, nr),
|
||||
.head_offset = offsetof(struct omap_group, ht_head),
|
||||
};
|
||||
|
||||
/*
|
||||
* Track an cached inode in its group. Our set can be racing with a
|
||||
* final clear that removes the group from the hash, sets total to
|
||||
* UINT_MAX, and calls rcu free. We can retry until the dead group is
|
||||
* no longer visible in the hash table and we can insert a new allocated
|
||||
* group.
|
||||
*
|
||||
* The caller must ensure that the bit is clear, -EEXIST will be
|
||||
* returned otherwise.
|
||||
*/
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
bool found;
|
||||
int ret = 0;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
retry:
|
||||
found = false;
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
if (group->total < UINT_MAX) {
|
||||
found = true;
|
||||
if (WARN_ON_ONCE(test_and_set_bit_le(bit_nr, group->bits)))
|
||||
ret = -EEXIST;
|
||||
else
|
||||
group->total++;
|
||||
}
|
||||
trace_group(sb, inc, group, bit_nr);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!found) {
|
||||
group = alloc_group(sb, group_nr);
|
||||
if (group) {
|
||||
ret = rhashtable_lookup_insert_fast(&ominf->group_ht, &group->ht_head,
|
||||
group_ht_params);
|
||||
if (ret < 0)
|
||||
free_group(sb, group);
|
||||
if (ret == -EEXIST)
|
||||
ret = 0;
|
||||
if (ret == -EBUSY) {
|
||||
/* wait for rehash to finish */
|
||||
synchronize_rcu();
|
||||
ret = 0;
|
||||
}
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
bool ret = false;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
ret = !!test_bit_le(bit_nr, group->bits);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear a previously set ino bit. Trying to clear a bit that's already
|
||||
* clear implies imbalanced set/clear or bugs freeing groups. We only
|
||||
* free groups here as the last clear drops the group's total to 0.
|
||||
*/
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
WARN_ON_ONCE(!test_bit_le(bit_nr, group->bits));
|
||||
WARN_ON_ONCE(group->total == 0);
|
||||
WARN_ON_ONCE(group->total == UINT_MAX);
|
||||
if (test_and_clear_bit_le(bit_nr, group->bits)) {
|
||||
if (--group->total == 0) {
|
||||
group->total = UINT_MAX;
|
||||
rhashtable_remove_fast(&ominf->group_ht, &group->ht_head,
|
||||
group_ht_params);
|
||||
call_rcu(&group->rcu, free_group_rcu);
|
||||
}
|
||||
}
|
||||
trace_group(sb, dec, group, bit_nr);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
WARN_ON_ONCE(!group);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server adds rids as it discovers clients. We add them to the
|
||||
* list of rids to send map requests to.
|
||||
*/
|
||||
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *found;
|
||||
|
||||
entry = kmalloc(sizeof(struct omap_rid_entry), GFP_NOFS);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
found = find_rid(&ominf->rids, rid);
|
||||
if (!found) {
|
||||
entry->rid = rid;
|
||||
list_add_tail(&entry->head, &ominf->rids.head);
|
||||
ominf->rids.nr_rids++;
|
||||
}
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
if (found)
|
||||
kfree(entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_req(struct omap_request *req)
|
||||
{
|
||||
free_rids(&req->rids);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static void free_req_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct omap_request *req = container_of(rcu, struct omap_request, rcu);
|
||||
|
||||
free_req(req);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params req_ht_params = {
|
||||
.key_len = member_sizeof(struct omap_request, map.args.req_id),
|
||||
.key_offset = offsetof(struct omap_request, map.args.req_id),
|
||||
.head_offset = offsetof(struct omap_request, ht_head),
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove a rid from all the pending requests. If it's the last rid we
|
||||
* give the caller the details to send a response, they'll call back to
|
||||
* keep removing. If their send fails they're going to shutdown the
|
||||
* server so we can queue freeing the request as we give it to them.
|
||||
*/
|
||||
static int remove_rid_from_reqs(struct omap_info *ominf, u64 rid, u64 *resp_rid, u64 *resp_id,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct rhashtable_iter iter;
|
||||
struct omap_request *req;
|
||||
int ret = 0;
|
||||
|
||||
rhashtable_walk_enter(&ominf->req_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
for (;;) {
|
||||
req = rhashtable_walk_next(&iter);
|
||||
if (req == NULL)
|
||||
break;
|
||||
if (req == ERR_PTR(-EAGAIN))
|
||||
continue;
|
||||
|
||||
spin_lock(&req->lock);
|
||||
entry = find_rid(&req->rids, rid);
|
||||
if (entry && free_rid(&req->rids, entry) == 0) {
|
||||
*resp_rid = req->client_rid;
|
||||
*resp_id = req->client_id;
|
||||
memcpy(map, &req->map, sizeof(struct scoutfs_open_ino_map));
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
call_rcu(&req->rcu, free_req_rcu);
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock(&req->lock);
|
||||
if (ret > 0)
|
||||
break;
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
if (ret <= 0) {
|
||||
*resp_rid = 0;
|
||||
*resp_id = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A client has been evicted. Remove its rid from the list and walk
|
||||
* through all the pending requests and remove its rids, sending the
|
||||
* response if it was the last rid waiting for a response.
|
||||
*
|
||||
* If this returns an error then the server will shut down.
|
||||
*
|
||||
* This can be called multiple times by different servers if there are
|
||||
* errors reclaiming an evicted mount, so we allow asking to remove a
|
||||
* rid that hasn't been added.
|
||||
*/
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct scoutfs_open_ino_map *map = NULL;
|
||||
struct omap_rid_entry *entry;
|
||||
u64 resp_rid = 0;
|
||||
u64 resp_id = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
entry = find_rid(&ominf->rids, rid);
|
||||
if (entry)
|
||||
free_rid(&ominf->rids, entry);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
if (!entry) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* remove the rid from all pending requests, sending responses if it was final */
|
||||
for (;;) {
|
||||
ret = remove_rid_from_reqs(ominf, rid, &resp_rid, &resp_id, map);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
ret = scoutfs_server_send_omap_response(sb, resp_rid, resp_id, map, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a single incoming request in the server. This could have been
|
||||
* delayed by recovery. This only returns an error if we couldn't send
|
||||
* a processing error response to the client.
|
||||
*/
|
||||
static int handle_request(struct super_block *sb, struct omap_request *req)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_rid_list priv_rids;
|
||||
struct omap_rid_entry *entry;
|
||||
int ret;
|
||||
|
||||
init_rid_list(&priv_rids);
|
||||
|
||||
ret = copy_rids(&priv_rids, &ominf->rids, &ominf->lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* don't send a request to the client who originated this request */
|
||||
entry = find_rid(&priv_rids, req->client_rid);
|
||||
if (entry && free_rid(&priv_rids, entry) == 0) {
|
||||
ret = scoutfs_server_send_omap_response(sb, req->client_rid, req->client_id,
|
||||
&req->map, 0);
|
||||
kfree(req);
|
||||
req = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* this lock isn't needed but sparse gave warnings with conditional locking */
|
||||
ret = copy_rids(&req->rids, &priv_rids, &ominf->lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
do {
|
||||
ret = rhashtable_insert_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
if (ret == -EBUSY)
|
||||
synchronize_rcu(); /* wait for rehash to finish */
|
||||
} while (ret == -EBUSY);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We can start getting responses the moment we send the first response. After
|
||||
* we send the last request the req can be freed.
|
||||
*/
|
||||
while ((entry = list_first_entry_or_null(&priv_rids.head, struct omap_rid_entry, head))) {
|
||||
ret = scoutfs_server_send_omap_request(sb, entry->rid, &req->map.args);
|
||||
if (ret < 0) {
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
goto out;
|
||||
}
|
||||
|
||||
free_rid(&priv_rids, entry);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
free_rids(&priv_rids);
|
||||
if ((ret < 0) && (req != NULL)) {
|
||||
ret = scoutfs_server_send_omap_response(sb, req->client_rid, req->client_id,
|
||||
NULL, ret);
|
||||
free_req(req);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle all previously received omap requests from clients. Once
|
||||
* we've finished recovery and can send requests to all clients we can
|
||||
* handle all pending requests. The handling function frees the request
|
||||
* and only returns an error if it couldn't send a response to the
|
||||
* client.
|
||||
*/
|
||||
static int handle_requests(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct llist_node *requests;
|
||||
struct omap_request *req;
|
||||
struct omap_request *tmp;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
if (scoutfs_recov_next_pending(sb, 0, SCOUTFS_RECOV_GREETING))
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
requests = llist_del_all(&ominf->requests);
|
||||
|
||||
llist_for_each_entry_safe(req, tmp, requests, llnode) {
|
||||
err = handle_request(sb, req);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_omap_finished_recovery(struct super_block *sb)
|
||||
{
|
||||
return handle_requests(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is receiving a request from a client for the bitmap of all
|
||||
* open inodes around their ino. Queue it for processing which is
|
||||
* typically immediate and inline but which can be deferred by recovery
|
||||
* as the server first starts up.
|
||||
*/
|
||||
int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_request *req;
|
||||
|
||||
req = kzalloc(sizeof(struct omap_request), GFP_NOFS);
|
||||
if (req == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&req->lock);
|
||||
req->client_rid = rid;
|
||||
req->client_id = id;
|
||||
init_rid_list(&req->rids);
|
||||
req->map.args.group_nr = args->group_nr;
|
||||
req->map.args.req_id = cpu_to_le64(atomic64_inc_return(&ominf->next_req_id));
|
||||
|
||||
llist_add(&req->llnode, &ominf->requests);
|
||||
|
||||
return handle_requests(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The client is receiving a request from the server for its map for the
|
||||
* given group. Look up the group and copy the bits to the map.
|
||||
*
|
||||
* The mount originating the request for this bitmap has the inode group
|
||||
* write locked. We can't be adding links to any inodes in the group
|
||||
* because that requires the lock. Inodes bits can be set and cleared
|
||||
* while we're sampling the bitmap. These races are fine, they can't be
|
||||
* adding cached inodes if nlink is 0 and we don't have the lock. If
|
||||
* the caller is removing a set bit then they're about to try and delete
|
||||
* the inode themselves and will first have to acquire the cluster lock
|
||||
* themselves.
|
||||
*/
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
u64 group_nr = le64_to_cpu(args->group_nr);
|
||||
struct scoutfs_open_ino_map *map;
|
||||
struct omap_group *group;
|
||||
bool copied = false;
|
||||
int ret;
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
map->args = *args;
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
trace_group(sb, request, group, -1);
|
||||
if (group->total > 0 && group->total < UINT_MAX) {
|
||||
memcpy(map->bits, group->bits, sizeof(map->bits));
|
||||
copied = true;
|
||||
}
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!copied)
|
||||
memset(map->bits, 0, sizeof(map->bits));
|
||||
|
||||
ret = scoutfs_client_send_omap_response(sb, id, map);
|
||||
kfree(map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server has received an open ino map response from a client. Find
|
||||
* the original request that it's serving, or in the response's map, and
|
||||
* send a reply if this was the last response from a client we were
|
||||
* waiting for.
|
||||
*
|
||||
* We can get responses for requests we're no longer tracking if, for
|
||||
* example, sending to a client gets an error. We'll have already sent
|
||||
* the response to the requesting client so we drop these responses on
|
||||
* the floor.
|
||||
*/
|
||||
int scoutfs_omap_server_handle_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_open_ino_map *resp_map)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct scoutfs_open_ino_map *map;
|
||||
struct omap_rid_entry *entry;
|
||||
bool send_response = false;
|
||||
struct omap_request *req;
|
||||
u64 resp_rid;
|
||||
u64 resp_id;
|
||||
int ret;
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
req = rhashtable_lookup(&ominf->req_ht, &resp_map->args.req_id, req_ht_params);
|
||||
if (req) {
|
||||
spin_lock(&req->lock);
|
||||
entry = find_rid(&req->rids, rid);
|
||||
if (entry) {
|
||||
bitmap_or((unsigned long *)req->map.bits, (unsigned long *)req->map.bits,
|
||||
(unsigned long *)resp_map->bits, SCOUTFS_OPEN_INO_MAP_BITS);
|
||||
if (free_rid(&req->rids, entry) == 0)
|
||||
send_response = true;
|
||||
}
|
||||
spin_unlock(&req->lock);
|
||||
|
||||
if (send_response) {
|
||||
resp_rid = req->client_rid;
|
||||
resp_id = req->client_id;
|
||||
memcpy(map, &req->map, sizeof(struct scoutfs_open_ino_map));
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
call_rcu(&req->rcu, free_req_rcu);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (send_response)
|
||||
ret = scoutfs_server_send_omap_response(sb, resp_rid, resp_id, map, 0);
|
||||
else
|
||||
ret = 0;
|
||||
kfree(map);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is shutting down. Free all the server state associated
|
||||
* with ongoing request processing. Clients who still have requests
|
||||
* pending will resend them to the next server.
|
||||
*/
|
||||
void scoutfs_omap_server_shutdown(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct rhashtable_iter iter;
|
||||
struct llist_node *requests;
|
||||
struct omap_request *req;
|
||||
struct omap_request *tmp;
|
||||
|
||||
rhashtable_walk_enter(&ominf->req_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
for (;;) {
|
||||
req = rhashtable_walk_next(&iter);
|
||||
if (req == NULL)
|
||||
break;
|
||||
if (req == ERR_PTR(-EAGAIN))
|
||||
continue;
|
||||
|
||||
if (req->rids.nr_rids != 0) {
|
||||
free_rids(&req->rids);
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
call_rcu(&req->rcu, free_req_rcu);
|
||||
}
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
requests = llist_del_all(&ominf->requests);
|
||||
llist_for_each_entry_safe(req, tmp, requests, llnode)
|
||||
kfree(req);
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
free_rid_list(&ominf->rids);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
int scoutfs_omap_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct omap_info *ominf;
|
||||
int ret;
|
||||
|
||||
ominf = kzalloc(sizeof(struct omap_info), GFP_KERNEL);
|
||||
if (!ominf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rhashtable_init(&ominf->group_ht, &group_ht_params);
|
||||
if (ret < 0) {
|
||||
kfree(ominf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rhashtable_init(&ominf->req_ht, &req_ht_params);
|
||||
if (ret < 0) {
|
||||
rhashtable_destroy(&ominf->group_ht);
|
||||
kfree(ominf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_llist_head(&ominf->requests);
|
||||
spin_lock_init(&ominf->lock);
|
||||
init_rid_list(&ominf->rids);
|
||||
atomic64_set(&ominf->next_req_id, 0);
|
||||
|
||||
sbi->omap_info = ominf;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* To get here the server must have shut down, freeing requests, and
|
||||
* evict must have been called on all cached inodes so we can just
|
||||
* synchronize all the pending group frees.
|
||||
*/
|
||||
void scoutfs_omap_destroy(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct rhashtable_iter iter;
|
||||
|
||||
if (ominf) {
|
||||
synchronize_rcu();
|
||||
|
||||
/* double check that all the groups deced to 0 and were freed */
|
||||
rhashtable_walk_enter(&ominf->group_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
WARN_ON_ONCE(rhashtable_walk_peek(&iter) != NULL);
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
free_rid_list(&ominf->rids);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
rhashtable_destroy(&ominf->group_ht);
|
||||
rhashtable_destroy(&ominf->req_ht);
|
||||
kfree(ominf);
|
||||
sbi->omap_info = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
#ifndef _SCOUTFS_OMAP_H_
|
||||
#define _SCOUTFS_OMAP_H_
|
||||
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino);
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr);
|
||||
|
||||
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid);
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid);
|
||||
int scoutfs_omap_finished_recovery(struct super_block *sb);
|
||||
int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
int scoutfs_omap_server_handle_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_open_ino_map *resp_map);
|
||||
void scoutfs_omap_server_shutdown(struct super_block *sb);
|
||||
|
||||
int scoutfs_omap_setup(struct super_block *sb);
|
||||
void scoutfs_omap_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
@@ -26,49 +26,22 @@
|
||||
#include "msg.h"
|
||||
#include "options.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "alloc.h"
|
||||
|
||||
enum {
|
||||
Opt_acl,
|
||||
Opt_data_prealloc_blocks,
|
||||
Opt_data_prealloc_contig_only,
|
||||
Opt_ino_alloc_per_lock,
|
||||
Opt_lock_idle_count,
|
||||
Opt_log_merge_wait_timeout_ms,
|
||||
Opt_metadev_path,
|
||||
Opt_noacl,
|
||||
Opt_orphan_scan_delay_ms,
|
||||
Opt_quorum_heartbeat_timeout_ms,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_tcp_keepalive_timeout_ms,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_acl, "acl"},
|
||||
{Opt_data_prealloc_blocks, "data_prealloc_blocks=%s"},
|
||||
{Opt_data_prealloc_contig_only, "data_prealloc_contig_only=%s"},
|
||||
{Opt_ino_alloc_per_lock, "ino_alloc_per_lock=%s"},
|
||||
{Opt_lock_idle_count, "lock_idle_count=%s"},
|
||||
{Opt_log_merge_wait_timeout_ms, "log_merge_wait_timeout_ms=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_noacl, "noacl"},
|
||||
{Opt_orphan_scan_delay_ms, "orphan_scan_delay_ms=%s"},
|
||||
{Opt_quorum_heartbeat_timeout_ms, "quorum_heartbeat_timeout_ms=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_tcp_keepalive_timeout_ms, "tcp_keepalive_timeout_ms=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
struct options_info {
|
||||
seqlock_t seqlock;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_sysfs_attrs sysfs_attrs;
|
||||
struct options_sb_info {
|
||||
struct dentry *debugfs_dir;
|
||||
};
|
||||
|
||||
#define DECLARE_OPTIONS_INFO(sb, name) \
|
||||
struct options_info *name = SCOUTFS_SB(sb)->options_info
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
@@ -116,252 +89,58 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
kfree(opts->metadev_path);
|
||||
}
|
||||
|
||||
#define MIN_LOCK_IDLE_COUNT 32
|
||||
#define DEFAULT_LOCK_IDLE_COUNT (10 * 1000)
|
||||
#define MAX_LOCK_IDLE_COUNT (100 * 1000)
|
||||
|
||||
#define MIN_LOG_MERGE_WAIT_TIMEOUT_MS 100UL
|
||||
#define DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS 500
|
||||
#define MAX_LOG_MERGE_WAIT_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
#define MIN_ORPHAN_SCAN_DELAY_MS 100UL
|
||||
#define DEFAULT_ORPHAN_SCAN_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_ORPHAN_SCAN_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
#define MIN_DATA_PREALLOC_BLOCKS 1ULL
|
||||
#define MAX_DATA_PREALLOC_BLOCKS ((unsigned long long)SCOUTFS_BLOCK_SM_MAX)
|
||||
|
||||
#define DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
memset(opts, 0, sizeof(*opts));
|
||||
|
||||
opts->data_prealloc_blocks = SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS;
|
||||
opts->data_prealloc_contig_only = 1;
|
||||
opts->ino_alloc_per_lock = SCOUTFS_LOCK_INODE_GROUP_NR;
|
||||
opts->lock_idle_count = DEFAULT_LOCK_IDLE_COUNT;
|
||||
opts->log_merge_wait_timeout_ms = DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS;
|
||||
opts->orphan_scan_delay_ms = -1;
|
||||
opts->quorum_heartbeat_timeout_ms = SCOUTFS_QUORUM_DEF_HB_TIMEO_MS;
|
||||
opts->quorum_slot_nr = -1;
|
||||
opts->tcp_keepalive_timeout_ms = DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
static int verify_lock_idle_count(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse lock_idle_count value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < MIN_LOCK_IDLE_COUNT || val > MAX_LOCK_IDLE_COUNT) {
|
||||
scoutfs_err(sb, "invalid lock_idle_count value %d, must be between %u and %u",
|
||||
val, MIN_LOCK_IDLE_COUNT, MAX_LOCK_IDLE_COUNT);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_log_merge_wait_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse log_merge_wait_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < MIN_LOG_MERGE_WAIT_TIMEOUT_MS || val > MAX_LOG_MERGE_WAIT_TIMEOUT_MS) {
|
||||
scoutfs_err(sb, "invalid log_merge_wait_timeout_ms value %d, must be between %lu and %lu",
|
||||
val, MIN_LOG_MERGE_WAIT_TIMEOUT_MS, MAX_LOG_MERGE_WAIT_TIMEOUT_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_quorum_heartbeat_timeout_ms(struct super_block *sb, int ret, u64 val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse quorum_heartbeat_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < SCOUTFS_QUORUM_MIN_HB_TIMEO_MS || val > SCOUTFS_QUORUM_MAX_HB_TIMEO_MS) {
|
||||
scoutfs_err(sb, "invalid quorum_heartbeat_timeout_ms value %llu, must be between %lu and %lu",
|
||||
val, SCOUTFS_QUORUM_MIN_HB_TIMEO_MS, SCOUTFS_QUORUM_MAX_HB_TIMEO_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_tcp_keepalive_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse tcp_keepalive_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val <= (UNRESPONSIVE_PROBES * MSEC_PER_SEC)) {
|
||||
scoutfs_err(sb, "invalid tcp_keepalive_timeout_ms value %d, must be larger than %lu",
|
||||
val, (UNRESPONSIVE_PROBES * MSEC_PER_SEC));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the option string into our options struct. This can allocate
|
||||
* memory in the struct. The caller is responsible for always calling
|
||||
* free_options() when the struct is destroyed, including when we return
|
||||
* an error.
|
||||
*/
|
||||
static int parse_options(struct super_block *sb, char *options, struct scoutfs_mount_options *opts)
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
{
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
u64 nr64;
|
||||
int nr;
|
||||
int token;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
|
||||
case Opt_acl:
|
||||
sb->s_flags |= SB_POSIXACL;
|
||||
break;
|
||||
|
||||
case Opt_data_prealloc_blocks:
|
||||
ret = match_u64(args, &nr64);
|
||||
if (ret < 0 ||
|
||||
nr64 < MIN_DATA_PREALLOC_BLOCKS || nr64 > MAX_DATA_PREALLOC_BLOCKS) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_blocks option, must be between %llu and %llu",
|
||||
MIN_DATA_PREALLOC_BLOCKS, MAX_DATA_PREALLOC_BLOCKS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->data_prealloc_blocks = nr64;
|
||||
break;
|
||||
|
||||
case Opt_data_prealloc_contig_only:
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 || nr > 1) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_contig_only option, bool must only be 0 or 1");
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->data_prealloc_contig_only = nr;
|
||||
break;
|
||||
|
||||
case Opt_ino_alloc_per_lock:
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 1 || nr > SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
scoutfs_err(sb, "invalid ino_alloc_per_lock option, must be between 1 and %u",
|
||||
SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->ino_alloc_per_lock = nr;
|
||||
break;
|
||||
|
||||
case Opt_tcp_keepalive_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_tcp_keepalive_timeout_ms(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->tcp_keepalive_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_lock_idle_count:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_lock_idle_count(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->lock_idle_count = nr;
|
||||
break;
|
||||
|
||||
case Opt_log_merge_wait_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->log_merge_wait_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_metadev_path:
|
||||
ret = parse_bdev_path(sb, &args[0], &opts->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case Opt_noacl:
|
||||
sb->s_flags &= ~SB_POSIXACL;
|
||||
break;
|
||||
|
||||
case Opt_orphan_scan_delay_ms:
|
||||
if (opts->orphan_scan_delay_ms != -1) {
|
||||
scoutfs_err(sb, "multiple orphan_scan_delay_ms options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 ||
|
||||
nr < MIN_ORPHAN_SCAN_DELAY_MS || nr > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms option, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->orphan_scan_delay_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_quorum_heartbeat_timeout_ms:
|
||||
ret = match_u64(args, &nr64);
|
||||
ret = verify_quorum_heartbeat_timeout_ms(sb, ret, nr64);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->quorum_heartbeat_timeout_ms = nr64;
|
||||
break;
|
||||
|
||||
case Opt_quorum_slot_nr:
|
||||
if (opts->quorum_slot_nr != -1) {
|
||||
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->quorum_slot_nr = nr;
|
||||
parsed->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
ret = parse_bdev_path(sb, &args[0],
|
||||
&parsed->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"", p);
|
||||
return -EINVAL;
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"",
|
||||
p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (opts->orphan_scan_delay_ms == -1)
|
||||
opts->orphan_scan_delay_ms = DEFAULT_ORPHAN_SCAN_DELAY_MS;
|
||||
|
||||
if (!opts->metadev_path) {
|
||||
if (!parsed->metadev_path) {
|
||||
scoutfs_err(sb, "Required mount option \"metadev_path\" not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -369,423 +148,40 @@ static int parse_options(struct super_block *sb, char *options, struct scoutfs_m
|
||||
return 0;
|
||||
}
|
||||
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
unsigned int seq;
|
||||
|
||||
if (WARN_ON_ONCE(optinf == NULL)) {
|
||||
/* trying to use options before early setup or after destroy */
|
||||
init_default_options(opts);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&optinf->seqlock);
|
||||
memcpy(opts, &optinf->opts, sizeof(struct scoutfs_mount_options));
|
||||
} while (read_seqretry(&optinf->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Early setup that parses and stores the options so that the rest of
|
||||
* setup can use them. Full options setup that relies on other
|
||||
* components will be done later.
|
||||
*/
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options)
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct options_info *optinf;
|
||||
struct options_sb_info *osi;
|
||||
int ret;
|
||||
|
||||
init_default_options(&opts);
|
||||
osi = kzalloc(sizeof(struct options_sb_info), GFP_KERNEL);
|
||||
if (!osi)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = parse_options(sb, options, &opts);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
sbi->options = osi;
|
||||
|
||||
optinf = kzalloc(sizeof(struct options_info), GFP_KERNEL);
|
||||
if (!optinf) {
|
||||
osi->debugfs_dir = debugfs_create_dir("options", sbi->debug_root);
|
||||
if (!osi->debugfs_dir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seqlock_init(&optinf->seqlock);
|
||||
scoutfs_sysfs_init_attrs(sb, &optinf->sysfs_attrs);
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts = opts;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
sbi->options_info = optinf;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0)
|
||||
free_options(&opts);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct scoutfs_mount_options opts;
|
||||
const bool is_acl = !!(sb->s_flags & SB_POSIXACL);
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
if (is_acl)
|
||||
seq_puts(seq, ",acl");
|
||||
seq_printf(seq, ",data_prealloc_blocks=%llu", opts.data_prealloc_blocks);
|
||||
seq_printf(seq, ",data_prealloc_contig_only=%u", opts.data_prealloc_contig_only);
|
||||
seq_printf(seq, ",ino_alloc_per_lock=%u", opts.ino_alloc_per_lock);
|
||||
seq_printf(seq, ",metadev_path=%s", opts.metadev_path);
|
||||
if (!is_acl)
|
||||
seq_puts(seq, ",noacl");
|
||||
seq_printf(seq, ",orphan_scan_delay_ms=%u", opts.orphan_scan_delay_ms);
|
||||
if (opts.quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts.quorum_slot_nr);
|
||||
seq_printf(seq, ",tcp_keepalive_timeout_ms=%d", opts.tcp_keepalive_timeout_ms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t data_prealloc_blocks_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", opts.data_prealloc_blocks);
|
||||
}
|
||||
static ssize_t data_prealloc_blocks_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_DATA_PREALLOC_BLOCKS || val > MAX_DATA_PREALLOC_BLOCKS) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_blocks option, must be between %llu and %llu",
|
||||
MIN_DATA_PREALLOC_BLOCKS, MAX_DATA_PREALLOC_BLOCKS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.data_prealloc_blocks = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_blocks);
|
||||
|
||||
static ssize_t data_prealloc_contig_only_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.data_prealloc_contig_only);
|
||||
}
|
||||
static ssize_t data_prealloc_contig_only_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < 0 || val > 1) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_contig_only option, bool must be 0 or 1");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.data_prealloc_contig_only = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_contig_only);
|
||||
|
||||
static ssize_t ino_alloc_per_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.ino_alloc_per_lock);
|
||||
}
|
||||
static ssize_t ino_alloc_per_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < 1 || val > SCOUTFS_LOCK_INODE_GROUP_NR) {
|
||||
scoutfs_err(sb, "invalid ino_alloc_per_lock option, must be between 1 and %u",
|
||||
SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.ino_alloc_per_lock = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(ino_alloc_per_lock);
|
||||
|
||||
static ssize_t lock_idle_count_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.lock_idle_count);
|
||||
}
|
||||
static ssize_t lock_idle_count_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
int val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoint(nullterm, 0, &val);
|
||||
ret = verify_lock_idle_count(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.lock_idle_count = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(lock_idle_count);
|
||||
|
||||
static ssize_t log_merge_wait_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.log_merge_wait_timeout_ms);
|
||||
}
|
||||
static ssize_t log_merge_wait_timeout_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
int val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoint(nullterm, 0, &val);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.log_merge_wait_timeout_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(log_merge_wait_timeout_ms);
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts.metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t orphan_scan_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.orphan_scan_delay_ms);
|
||||
}
|
||||
static ssize_t orphan_scan_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_ORPHAN_SCAN_DELAY_MS || val > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms value written to options sysfs file, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.orphan_scan_delay_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(orphan_scan_delay_ms);
|
||||
|
||||
static ssize_t quorum_heartbeat_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", opts.quorum_heartbeat_timeout_ms);
|
||||
}
|
||||
static ssize_t quorum_heartbeat_timeout_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
ret = verify_quorum_heartbeat_timeout_ms(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.quorum_heartbeat_timeout_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(quorum_heartbeat_timeout_ms);
|
||||
|
||||
static ssize_t quorum_slot_nr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts.quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_slot_nr);
|
||||
|
||||
static struct attribute *options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_blocks),
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_contig_only),
|
||||
SCOUTFS_ATTR_PTR(ino_alloc_per_lock),
|
||||
SCOUTFS_ATTR_PTR(lock_idle_count),
|
||||
SCOUTFS_ATTR_PTR(log_merge_wait_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(orphan_scan_delay_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_heartbeat_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_slot_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &optinf->sysfs_attrs, options_attrs, "mount_options");
|
||||
if (ret < 0)
|
||||
if (ret)
|
||||
scoutfs_options_destroy(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We remove the sysfs files early in unmount so that they can't try to call other subsystems
|
||||
* as they're being destroyed.
|
||||
*/
|
||||
void scoutfs_options_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (optinf)
|
||||
scoutfs_sysfs_destroy_attrs(sb, &optinf->sysfs_attrs);
|
||||
}
|
||||
|
||||
void scoutfs_options_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
struct options_sb_info *osi = sbi->options;
|
||||
|
||||
scoutfs_options_stop(sb);
|
||||
|
||||
if (optinf) {
|
||||
free_options(&optinf->opts);
|
||||
kfree(optinf);
|
||||
sbi->options_info = NULL;
|
||||
if (osi) {
|
||||
if (osi->debugfs_dir)
|
||||
debugfs_remove_recursive(osi->debugfs_dir);
|
||||
kfree(osi);
|
||||
sbi->options = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,27 +5,23 @@
|
||||
#include <linux/in.h>
|
||||
#include "format.h"
|
||||
|
||||
struct scoutfs_mount_options {
|
||||
u64 data_prealloc_blocks;
|
||||
bool data_prealloc_contig_only;
|
||||
unsigned int ino_alloc_per_lock;
|
||||
int lock_idle_count;
|
||||
unsigned int log_merge_wait_timeout_ms;
|
||||
char *metadev_path;
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
int quorum_slot_nr;
|
||||
u64 quorum_heartbeat_timeout_ms;
|
||||
int tcp_keepalive_timeout_ms;
|
||||
enum scoutfs_mount_options {
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
struct mount_options {
|
||||
int quorum_slot_nr;
|
||||
char *metadev_path;
|
||||
};
|
||||
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts);
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root);
|
||||
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options);
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed);
|
||||
int scoutfs_options_setup(struct super_block *sb);
|
||||
void scoutfs_options_stop(struct super_block *sb);
|
||||
void scoutfs_options_destroy(struct super_block *sb);
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token);
|
||||
#define scoutfs_option_bool scoutfs_option_u32
|
||||
|
||||
#endif /* _SCOUTFS_OPTIONS_H_ */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,14 +2,12 @@
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i,
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_config *qconf,
|
||||
u64 term);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_destroy(struct super_block *sb);
|
||||
|
||||
1266
kmod/src/quota.c
1266
kmod/src/quota.c
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
||||
#ifndef _SCOUTFS_QUOTA_H_
|
||||
#define _SCOUTFS_QUOTA_H_
|
||||
|
||||
#include "ioctl.h"
|
||||
|
||||
/*
|
||||
* Each rule's name can be in the ruleset's rbtree associated with the
|
||||
* source attr that it selects. This lets checks only test rules that
|
||||
* the inputs could match. The 'i' field indicates which name is in the
|
||||
* tree so we can find the containing rule.
|
||||
*
|
||||
* This is mostly private to quota.c but we expose it for tracing.
|
||||
*/
|
||||
struct squota_rule {
|
||||
u64 limit;
|
||||
u8 prio;
|
||||
u8 op;
|
||||
u8 rule_flags;
|
||||
struct squota_rule_name {
|
||||
struct rb_node node;
|
||||
u64 val;
|
||||
u8 source;
|
||||
u8 flags;
|
||||
u8 i;
|
||||
} names[3];
|
||||
};
|
||||
|
||||
/* private to quota.c, only here for tracing */
|
||||
struct squota_input {
|
||||
u64 attrs[SQ_NS__NR_SELECT];
|
||||
u8 op;
|
||||
};
|
||||
|
||||
int scoutfs_quota_check_inode(struct super_block *sb, struct inode *dir);
|
||||
int scoutfs_quota_check_data(struct super_block *sb, struct inode *inode);
|
||||
|
||||
int scoutfs_quota_get_rules(struct super_block *sb, u64 *iterator,
|
||||
struct scoutfs_ioctl_quota_rule *irules, int nr);
|
||||
int scoutfs_quota_mod_rule(struct super_block *sb, bool is_add,
|
||||
struct scoutfs_ioctl_quota_rule *irule);
|
||||
|
||||
void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_quota_invalidate(struct super_block *sb);
|
||||
|
||||
int scoutfs_quota_setup(struct super_block *sb);
|
||||
void scoutfs_quota_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
305
kmod/src/recov.c
305
kmod/src/recov.c
@@ -1,305 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "recov.h"
|
||||
#include "cmp.h"
|
||||
|
||||
/*
|
||||
* There are a few server messages which can't be processed until they
|
||||
* know that they have state for all possibly active clients. These
|
||||
* little helpers track which clients have recovered what state and give
|
||||
* those message handlers a call to check if recovery has completed. We
|
||||
* track the timeout here, but all we do is call back into the server to
|
||||
* take steps to evict timed out clients and then let us know that their
|
||||
* recovery has finished.
|
||||
*/
|
||||
|
||||
struct recov_info {
|
||||
struct super_block *sb;
|
||||
spinlock_t lock;
|
||||
struct list_head pending;
|
||||
struct timer_list timer;
|
||||
void (*timeout_fn)(struct super_block *);
|
||||
};
|
||||
|
||||
#define DECLARE_RECOV_INFO(sb, name) \
|
||||
struct recov_info *name = SCOUTFS_SB(sb)->recov_info
|
||||
|
||||
struct recov_pending {
|
||||
struct list_head head;
|
||||
u64 rid;
|
||||
int which;
|
||||
};
|
||||
|
||||
static struct recov_pending *next_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
{
|
||||
struct recov_pending *pend;
|
||||
|
||||
list_for_each_entry(pend, &recinf->pending, head) {
|
||||
if (pend->rid > rid && pend->which & which)
|
||||
return pend;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct recov_pending *lookup_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
{
|
||||
struct recov_pending *pend;
|
||||
|
||||
pend = next_pending(recinf, rid - 1, which);
|
||||
if (pend && pend->rid == rid)
|
||||
return pend;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We keep the pending list sorted by rid so that we can iterate over
|
||||
* them. The list should be small and shouldn't be used often.
|
||||
*/
|
||||
static int cmp_pending_rid(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
KC_LIST_CMP_CONST struct recov_pending *a = list_entry(A, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
KC_LIST_CMP_CONST struct recov_pending *b = list_entry(B, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
|
||||
return scoutfs_cmp_u64s(a->rid, b->rid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that we'll be waiting for a client to recover something.
|
||||
* _finished will eventually be called for every _prepare, either
|
||||
* because recovery naturally finished or because it timed out and the
|
||||
* server evicted the client.
|
||||
*/
|
||||
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *alloc;
|
||||
struct recov_pending *pend;
|
||||
|
||||
if (WARN_ON_ONCE(which & SCOUTFS_RECOV_INVALID))
|
||||
return -EINVAL;
|
||||
|
||||
alloc = kmalloc(sizeof(*pend), GFP_NOFS);
|
||||
if (!alloc)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
pend = lookup_pending(recinf, rid, SCOUTFS_RECOV_ALL);
|
||||
if (pend) {
|
||||
pend->which |= which;
|
||||
} else {
|
||||
swap(pend, alloc);
|
||||
pend->rid = rid;
|
||||
pend->which = which;
|
||||
list_add_tail(&pend->head, &recinf->pending);
|
||||
list_sort(NULL, &recinf->pending, cmp_pending_rid);
|
||||
}
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
kfree(alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Recovery is only finished once we've begun (which sets the timer) and
|
||||
* all clients have finished. If we didn't test the timer we could
|
||||
* claim it finished prematurely as clients are being prepared.
|
||||
*/
|
||||
static int recov_finished(struct recov_info *recinf)
|
||||
{
|
||||
return !!(recinf->timeout_fn != NULL && list_empty(&recinf->pending));
|
||||
}
|
||||
|
||||
static void timer_callback(struct timer_list *timer)
|
||||
{
|
||||
struct recov_info *recinf = from_timer(recinf, timer, timer);
|
||||
|
||||
recinf->timeout_fn(recinf->sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Begin waiting for recovery once we've prepared all the clients. If
|
||||
* the timeout period elapses before _finish is called on all prepared
|
||||
* clients then the timer will call the callback.
|
||||
*
|
||||
* Returns > 0 if all the prepared clients finish recovery before begin
|
||||
* is called.
|
||||
*/
|
||||
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
|
||||
unsigned int timeout_ms)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
int ret;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
recinf->timeout_fn = timeout_fn;
|
||||
recinf->timer.expires = jiffies + msecs_to_jiffies(timeout_ms);
|
||||
add_timer(&recinf->timer);
|
||||
|
||||
ret = recov_finished(recinf);
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
if (ret > 0)
|
||||
del_timer_sync(&recinf->timer);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A given client has recovered the given state. If it's finished all
|
||||
* recovery then we free it, and if all clients have finished recovery
|
||||
* then we cancel the timeout timer.
|
||||
*
|
||||
* Returns > 0 if _begin has been called and all clients have finished.
|
||||
* The caller will only see > 0 returned once.
|
||||
*/
|
||||
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
pend = lookup_pending(recinf, rid, which);
|
||||
if (pend) {
|
||||
pend->which &= ~which;
|
||||
if (pend->which) {
|
||||
pend = NULL;
|
||||
} else {
|
||||
list_del(&pend->head);
|
||||
ret = recov_finished(recinf);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
if (ret > 0)
|
||||
del_timer_sync(&recinf->timer);
|
||||
|
||||
kfree(pend);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the given client is still trying to recover
|
||||
* the given state.
|
||||
*/
|
||||
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
bool is_pending;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
is_pending = lookup_pending(recinf, rid, which) != NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
return is_pending;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the next rid after the given rid of a client waiting for the
|
||||
* given state to be recovered. Start with rid 0, returns 0 when there
|
||||
* are no more clients waiting for recovery.
|
||||
*
|
||||
* This is inherently racey. Callers are responsible for resolving any
|
||||
* actions taken based on pending with the recovery finishing, perhaps
|
||||
* before we return.
|
||||
*/
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
pend = next_pending(recinf, rid, which);
|
||||
rid = pend ? pend->rid : 0;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
return rid;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is shutting down and doesn't need to worry about recovery
|
||||
* anymore. It'll be built up again by the next server, if needed.
|
||||
*/
|
||||
void scoutfs_recov_shutdown(struct super_block *sb)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
struct recov_pending *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
del_timer_sync(&recinf->timer);
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
list_splice_init(&recinf->pending, &list);
|
||||
recinf->timeout_fn = NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
list_for_each_entry_safe(pend, tmp, &list, head) {
|
||||
list_del(&pend->head);
|
||||
kfree(pend);
|
||||
}
|
||||
}
|
||||
|
||||
int scoutfs_recov_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct recov_info *recinf;
|
||||
int ret;
|
||||
|
||||
recinf = kzalloc(sizeof(struct recov_info), GFP_KERNEL);
|
||||
if (!recinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
recinf->sb = sb;
|
||||
spin_lock_init(&recinf->lock);
|
||||
INIT_LIST_HEAD(&recinf->pending);
|
||||
timer_setup(&recinf->timer, timer_callback, 0);
|
||||
|
||||
sbi->recov_info = recinf;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_recov_destroy(struct super_block *sb)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (recinf) {
|
||||
scoutfs_recov_shutdown(sb);
|
||||
|
||||
kfree(recinf);
|
||||
sbi->recov_info = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
#ifndef _SCOUTFS_RECOV_H_
|
||||
#define _SCOUTFS_RECOV_H_
|
||||
|
||||
enum {
|
||||
SCOUTFS_RECOV_GREETING = ( 1 << 0),
|
||||
SCOUTFS_RECOV_LOCKS = ( 1 << 1),
|
||||
|
||||
SCOUTFS_RECOV_INVALID = (~0 << 2),
|
||||
SCOUTFS_RECOV_ALL = (~SCOUTFS_RECOV_INVALID),
|
||||
};
|
||||
|
||||
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which);
|
||||
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
|
||||
unsigned int timeout_ms);
|
||||
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which);
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, u64 rid, int which);
|
||||
void scoutfs_recov_shutdown(struct super_block *sb);
|
||||
|
||||
int scoutfs_recov_setup(struct super_block *sb);
|
||||
void scoutfs_recov_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
3860
kmod/src/server.c
3860
kmod/src/server.c
File diff suppressed because it is too large
Load Diff
@@ -56,31 +56,22 @@ do { \
|
||||
__entry->name##_data_len, __entry->name##_cmd, __entry->name##_flags, \
|
||||
__entry->name##_error
|
||||
|
||||
u64 scoutfs_server_reserved_meta_blocks(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
struct scoutfs_net_lock_grant_response *gr);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
void scoutfs_server_get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots);
|
||||
int scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
|
||||
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
int scoutfs_server_send_omap_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map *map, int err);
|
||||
|
||||
u64 scoutfs_server_seq(struct super_block *sb);
|
||||
u64 scoutfs_server_next_seq(struct super_block *sb);
|
||||
void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq);
|
||||
|
||||
void scoutfs_server_start(struct super_block *sb, struct scoutfs_quorum_config *qconf, u64 term);
|
||||
struct sockaddr_in;
|
||||
struct scoutfs_quorum_elected_info;
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
void scoutfs_server_stop_wait(struct super_block *sb);
|
||||
bool scoutfs_server_is_running(struct super_block *sb);
|
||||
bool scoutfs_server_is_up(struct super_block *sb);
|
||||
bool scoutfs_server_is_down(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_setup(struct super_block *sb);
|
||||
void scoutfs_server_destroy(struct super_block *sb);
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Unfortunately, kernels can ship which contain sparse errors that are
|
||||
# unrelated to us.
|
||||
#
|
||||
# The exit status of this filtering wrapper will indicate an error if
|
||||
# sparse wasn't found or if there were any unfiltered output lines. It
|
||||
# can hide error exit status from sparse or grep if they don't produce
|
||||
# output that makes it past the filters.
|
||||
#
|
||||
|
||||
# must have sparse. Fail with error message, mask success path.
|
||||
which sparse > /dev/null || exit 1
|
||||
|
||||
# initial unmatchable, additional added as RE+="|..."
|
||||
RE="$^"
|
||||
|
||||
#
|
||||
# Darn. sparse has multi-line error messages, and I'd rather not bother
|
||||
# with multi-line filters. So we'll just drop this context.
|
||||
#
|
||||
# command-line: note: in included file (through include/linux/netlink.h, include/linux/ethtool.h, include/linux/netdevice.h, include/net/sock.h, /root/scoutfs/kmod/src/kernelcompat.h, builtin):
|
||||
# fprintf(stderr, "%s: note: in included file%s:\n",
|
||||
#
|
||||
RE+="|: note: in included file"
|
||||
|
||||
# 3.10.0-1160.119.1.el7.x86_64.debug
|
||||
# include/linux/posix_acl.h:138:9: warning: incorrect type in assignment (different address spaces)
|
||||
# include/linux/posix_acl.h:138:9: expected struct posix_acl *<noident>
|
||||
# include/linux/posix_acl.h:138:9: got struct posix_acl [noderef] <asn:4>*<noident>
|
||||
RE+="|include/linux/posix_acl.h:"
|
||||
|
||||
# 3.10.0-1160.119.1.el7.x86_64.debug
|
||||
#include/uapi/linux/perf_event.h:146:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
|
||||
RE+="|include/uapi/linux/perf_event.h:"
|
||||
|
||||
# 4.18.0-513.24.1.el8_9.x86_64+debug'
|
||||
#./include/linux/skbuff.h:824:1: warning: directive in macro's argument list
|
||||
RE+="|include/linux/skbuff.h:"
|
||||
|
||||
sparse "$@" |& \
|
||||
grep -E -v "($RE)" |& \
|
||||
awk '{ print $0 } END { exit NR > 0 }'
|
||||
exit $?
|
||||
298
kmod/src/srch.c
298
kmod/src/srch.c
@@ -18,7 +18,6 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "format.h"
|
||||
@@ -29,11 +28,7 @@
|
||||
#include "btree.h"
|
||||
#include "spbm.h"
|
||||
#include "client.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "triggers.h"
|
||||
#include "sysfs.h"
|
||||
#include "msg.h"
|
||||
|
||||
/*
|
||||
* This srch subsystem gives us a way to find inodes that have a given
|
||||
@@ -62,7 +57,7 @@
|
||||
* re-allocated and re-written. Search can restart by checking the
|
||||
* btree for the current set of files. Compaction reads log files which
|
||||
* are protected from other compactions by the persistent busy items
|
||||
* created by the server. Compaction won't see its blocks reused out
|
||||
* created by the server. Compaction won't see it's blocks reused out
|
||||
* from under it, but it can encounter stale cached blocks that need to
|
||||
* be invalidated.
|
||||
*/
|
||||
@@ -72,14 +67,10 @@ struct srch_info {
|
||||
atomic_t shutdown;
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work compact_dwork;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
atomic_t compact_delay_ms;
|
||||
};
|
||||
|
||||
#define DECLARE_SRCH_INFO(sb, name) \
|
||||
struct srch_info *name = SCOUTFS_SB(sb)->srch_info
|
||||
#define DECLARE_SRCH_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_SRCH_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
#define SRE_FMT "%016llx.%llu.%llu"
|
||||
#define SRE_ARG(sre) \
|
||||
@@ -442,10 +433,6 @@ out:
|
||||
if (ret == 0 && (flags & GFB_INSERT) && blk >= le64_to_cpu(sfl->blocks))
|
||||
sfl->blocks = cpu_to_le64(blk + 1);
|
||||
|
||||
if (bl) {
|
||||
trace_scoutfs_get_file_block(sb, bl->blkno, flags);
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
@@ -532,87 +519,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Padded entries are encoded in pairs after an existing entry. All of
|
||||
* the pairs cancel each other out by all readers (the second encoding
|
||||
* looks like deletion) so they aren't visible to the first/last bounds of
|
||||
* the block or file.
|
||||
*
|
||||
* We use the same entry repeatedly, so the diff between them will be empty.
|
||||
* This lets us just emit the two-byte count word, leaving the other bytes
|
||||
* as zero.
|
||||
*
|
||||
* Split the desired total len into two pieces, adding any remainder to the
|
||||
* first four-bit value.
|
||||
*/
|
||||
static void append_padded_entry(struct scoutfs_srch_file *sfl,
|
||||
struct scoutfs_srch_block *srb,
|
||||
int len)
|
||||
{
|
||||
int each;
|
||||
int rem;
|
||||
u16 lengths = 0;
|
||||
u8 *buf = srb->entries + le32_to_cpu(srb->entry_bytes);
|
||||
|
||||
each = (len - 2) >> 1;
|
||||
rem = (len - 2) & 1;
|
||||
|
||||
lengths |= each + rem;
|
||||
lengths |= each << 4;
|
||||
|
||||
memset(buf, 0, len);
|
||||
put_unaligned_le16(lengths, buf);
|
||||
|
||||
le32_add_cpu(&srb->entry_nr, 1);
|
||||
le32_add_cpu(&srb->entry_bytes, len);
|
||||
le64_add_cpu(&sfl->entries, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called by a testing trigger to create a very specific case of
|
||||
* encoded entry offsets. We want the last entry in the block to start
|
||||
* precisely at the _SAFE_BYTES offset.
|
||||
*
|
||||
* This is called when there is a single existing entry in the block.
|
||||
* We have the entire block to work with. We encode pairs of matching
|
||||
* entries. This hides them from readers (both searches and merging) as
|
||||
* they're interpreted as creation and deletion and are deleted.
|
||||
*
|
||||
* For simplicity and to maintain sort ordering within the block, we reuse
|
||||
* the existing entry. This lets us skip the encoding step, because we know
|
||||
* the diff will be zero. We can zero-pad the resulting entries to hit the
|
||||
* target offset exactly.
|
||||
*
|
||||
* Because we can't predict the exact number of entry_bytes when we start,
|
||||
* we adjust the byte count of subsequent entries until we wind up at a
|
||||
* multiple of 20 bytes away from our goal and then use that length for
|
||||
* the remaining entries.
|
||||
*
|
||||
* We could just use a single pair of unnaturally large entries to consume
|
||||
* the needed space, adjusting for an odd number of entry_bytes if necessary.
|
||||
* The use of 19 or 20 bytes for the entry pair matches what we would see with
|
||||
* real (non-zero) entries that vary from the existing entry.
|
||||
*/
|
||||
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl,
|
||||
struct scoutfs_srch_block *srb)
|
||||
{
|
||||
u32 target;
|
||||
s32 diff;
|
||||
|
||||
target = SCOUTFS_SRCH_BLOCK_SAFE_BYTES + 2;
|
||||
|
||||
while ((diff = target - le32_to_cpu(srb->entry_bytes)) > 0) {
|
||||
append_padded_entry(sfl, srb, 10);
|
||||
if (diff % 20 == 0) {
|
||||
append_padded_entry(sfl, srb, 10);
|
||||
} else {
|
||||
append_padded_entry(sfl, srb, 9);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(diff != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is dropping an ino/id because the tracking rbtree is full.
|
||||
* This loses information so we can't return any entries at or after the
|
||||
@@ -745,14 +651,14 @@ static int search_log_file(struct super_block *sb,
|
||||
for (i = 0; i < le32_to_cpu(srb->entry_nr); i++) {
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = -EIO;
|
||||
ret = EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, &sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = -EIO;
|
||||
ret = EIO;
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
@@ -855,15 +761,15 @@ static int search_sorted_file(struct super_block *sb,
|
||||
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
ret = EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, &sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
ret = EIO;
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
prev = sre;
|
||||
@@ -954,6 +860,7 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
struct scoutfs_srch_rb_root *sroot,
|
||||
u64 hash, u64 ino, u64 last_ino, bool *done)
|
||||
{
|
||||
struct scoutfs_net_roots prev_roots;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_srch_entry start;
|
||||
struct scoutfs_srch_entry end;
|
||||
@@ -961,17 +868,15 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_srch_file sfl;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
struct scoutfs_key key;
|
||||
unsigned long limit = SRCH_LIMIT;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, srch_search_xattrs);
|
||||
|
||||
trace_scoutfs_ioc_search_xattrs(sb, ino, last_ino);
|
||||
|
||||
*done = false;
|
||||
srch_init_rb_root(sroot);
|
||||
memset(&prev_roots, 0, sizeof(prev_roots));
|
||||
|
||||
start.hash = cpu_to_le64(hash);
|
||||
start.ino = cpu_to_le64(ino);
|
||||
@@ -986,6 +891,7 @@ retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
memset(&roots.fs_root, 0, sizeof(roots.fs_root));
|
||||
|
||||
end = final;
|
||||
|
||||
@@ -1061,10 +967,16 @@ retry:
|
||||
*done = sre_cmp(&end, &final) == 0;
|
||||
ret = 0;
|
||||
out:
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.srch_root.ref,
|
||||
&roots.logs_root.ref);
|
||||
if (ret == -ESTALE)
|
||||
goto retry;
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_roots, &roots, sizeof(roots)) == 0) {
|
||||
scoutfs_inc_counter(sb, srch_search_stale_eio);
|
||||
ret = -EIO;
|
||||
} else {
|
||||
scoutfs_inc_counter(sb, srch_search_stale_retry);
|
||||
prev_roots = roots;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1077,30 +989,18 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_srch_file *sfl, bool force)
|
||||
struct scoutfs_srch_file *sfl)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (sfl->ref.blkno && !force && scoutfs_trigger(sb, SRCH_FORCE_LOG_ROTATE))
|
||||
force = true;
|
||||
|
||||
if (sfl->ref.blkno == 0 ||
|
||||
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
||||
if (le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT)
|
||||
return 0;
|
||||
|
||||
init_srch_key(&key, SCOUTFS_SRCH_LOG_TYPE,
|
||||
le64_to_cpu(sfl->ref.blkno), 0);
|
||||
ret = scoutfs_btree_insert(sb, alloc, wri, root, &key,
|
||||
sfl, sizeof(*sfl));
|
||||
/*
|
||||
* While it's fine to replay moving the client's logging srch
|
||||
* file to the core btree item, server commits should keep it
|
||||
* from happening. So we'll warn if we see it happen. This can
|
||||
* be removed eventually.
|
||||
*/
|
||||
if (WARN_ON_ONCE(ret == -EEXIST))
|
||||
ret = 0;
|
||||
if (ret == 0) {
|
||||
memset(sfl, 0, sizeof(*sfl));
|
||||
scoutfs_inc_counter(sb, srch_rotate_log);
|
||||
@@ -1406,7 +1306,7 @@ int scoutfs_srch_commit_compact(struct super_block *sb,
|
||||
ret = -EIO;
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0)
|
||||
if (ret < 0) /* XXX leaks allocators */
|
||||
goto out;
|
||||
|
||||
/* restore busy to pending if the operation failed */
|
||||
@@ -1426,8 +1326,10 @@ int scoutfs_srch_commit_compact(struct super_block *sb,
|
||||
/* update file references if we finished compaction (!deleting) */
|
||||
if (!(res->flags & SCOUTFS_SRCH_COMPACT_FLAG_DELETE)) {
|
||||
ret = commit_files(sb, alloc, wri, root, res);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
/* XXX we can't commit, shutdown? */
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* transition flags for deleting input files */
|
||||
for (i = 0; i < res->nr; i++) {
|
||||
@@ -1454,7 +1356,7 @@ update:
|
||||
le64_to_cpu(pending->id), 0);
|
||||
ret = scoutfs_btree_insert(sb, alloc, wri, root, &key,
|
||||
pending, sizeof(*pending));
|
||||
if (WARN_ON_ONCE(ret < 0)) /* XXX inconsistency */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1467,6 +1369,7 @@ update:
|
||||
BUG_ON(err); /* both busy and pending present */
|
||||
}
|
||||
out:
|
||||
WARN_ON_ONCE(ret < 0); /* XXX inconsistency */
|
||||
kfree(busy);
|
||||
return ret;
|
||||
}
|
||||
@@ -1557,7 +1460,7 @@ static int kway_merge(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_file *sfl,
|
||||
kway_get_t kway_get, kway_advance_t kway_adv,
|
||||
void **args, int nr, bool logs_input)
|
||||
void **args, int nr)
|
||||
{
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
struct scoutfs_srch_block *srb = NULL;
|
||||
@@ -1577,14 +1480,14 @@ static int kway_merge(struct super_block *sb,
|
||||
int ind;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(nr <= 0))
|
||||
if (WARN_ON_ONCE(nr <= 1))
|
||||
return -EINVAL;
|
||||
|
||||
/* always at least one parent for single leaf */
|
||||
nr_parents = max_t(unsigned long, 1, roundup_pow_of_two(nr) - 1);
|
||||
nr_parents = roundup_pow_of_two(nr) - 1;
|
||||
/* root at [1] for easy sib/parent index calc, final pad for odd sib */
|
||||
nr_nodes = 1 + nr_parents + nr + 1;
|
||||
tnodes = kc__vmalloc(nr_nodes * sizeof(struct tourn_node), GFP_NOFS);
|
||||
tnodes = __vmalloc(nr_nodes * sizeof(struct tourn_node),
|
||||
GFP_NOFS, PAGE_KERNEL);
|
||||
if (!tnodes)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1661,15 +1564,6 @@ static int kway_merge(struct super_block *sb,
|
||||
blk++;
|
||||
}
|
||||
|
||||
/* end sorted block on _SAFE offset for testing */
|
||||
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
|
||||
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
|
||||
pad_entries_at_safe(sfl, srb);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
blk++;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, srch_compact_entry);
|
||||
|
||||
} else {
|
||||
@@ -1712,8 +1606,6 @@ static int kway_merge(struct super_block *sb,
|
||||
empty++;
|
||||
ret = 0;
|
||||
} else if (ret < 0) {
|
||||
if (ret == -ENOANO) /* just testing trigger */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1797,7 +1689,7 @@ static void swap_page_sre(void *A, void *B, int size)
|
||||
* typically, ~10x worst case).
|
||||
*
|
||||
* Because we read and sort all the input files we must perform the full
|
||||
* compaction in one operation. The server must have given us
|
||||
* compaction in one operation. The server must have given us a
|
||||
* sufficiently large avail/freed lists, otherwise we'll return ENOSPC.
|
||||
*/
|
||||
static int compact_logs(struct super_block *sb,
|
||||
@@ -1852,7 +1744,7 @@ static int compact_logs(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
page->private = 0;
|
||||
list_add_tail(&page->lru, &pages);
|
||||
list_add_tail(&page->list, &pages);
|
||||
nr_pages++;
|
||||
scoutfs_inc_counter(sb, srch_compact_log_page);
|
||||
}
|
||||
@@ -1861,14 +1753,14 @@ static int compact_logs(struct super_block *sb,
|
||||
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
ret = EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = -EIO;
|
||||
ret = EIO;
|
||||
goto out;
|
||||
}
|
||||
prev = *sre;
|
||||
@@ -1905,7 +1797,7 @@ static int compact_logs(struct super_block *sb,
|
||||
|
||||
/* sort page entries and reset private for _next */
|
||||
i = 0;
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
list_for_each_entry(page, &pages, list) {
|
||||
args[i++] = page;
|
||||
|
||||
if (atomic_read(&srinf->shutdown)) {
|
||||
@@ -1921,12 +1813,12 @@ static int compact_logs(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_page, kway_adv_page,
|
||||
args, nr_pages, true);
|
||||
args, nr_pages);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* make sure we finished all the pages */
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
list_for_each_entry(page, &pages, list) {
|
||||
sre = page_priv_sre(page);
|
||||
if (page->private < SRES_PER_PAGE && sre->ino != 0) {
|
||||
ret = -ENOSPC;
|
||||
@@ -1939,8 +1831,8 @@ static int compact_logs(struct super_block *sb,
|
||||
out:
|
||||
scoutfs_block_put(sb, bl);
|
||||
vfree(args);
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
list_for_each_entry_safe(page, tmp, &pages, list) {
|
||||
list_del(&page->list);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
@@ -1979,18 +1871,12 @@ static int kway_get_reader(struct super_block *sb,
|
||||
srb = rdr->bl->data;
|
||||
|
||||
if (rdr->pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
|
||||
/* XXX inconsistency */
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rdr->decoded_bytes == 0 && rdr->pos == SCOUTFS_SRCH_BLOCK_SAFE_BYTES &&
|
||||
scoutfs_trigger(sb, SRCH_MERGE_STOP_SAFE)) {
|
||||
/* only used in testing */
|
||||
return -ENOANO;
|
||||
}
|
||||
|
||||
/* decode entry, possibly skipping start of the block */
|
||||
while (rdr->decoded_bytes == 0 || rdr->pos < rdr->skip) {
|
||||
ret = decode_entry(srb->entries + rdr->pos,
|
||||
@@ -2080,7 +1966,7 @@ static int compact_sorted(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_reader,
|
||||
kway_adv_reader, args, nr, false);
|
||||
kway_adv_reader, args, nr);
|
||||
|
||||
sc->flags |= SCOUTFS_SRCH_COMPACT_FLAG_DONE;
|
||||
for (i = 0; i < nr; i++) {
|
||||
@@ -2194,7 +2080,7 @@ static int delete_files(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_compact *sc)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sc->nr; i++) {
|
||||
@@ -2209,15 +2095,8 @@ static int delete_files(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void queue_compact_work(struct srch_info *srinf, bool immediate)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = immediate ? 0 : msecs_to_jiffies(atomic_read(&srinf->compact_delay_ms));
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
}
|
||||
/* wait 10s between compact attempts on error, immediate after success */
|
||||
#define SRCH_COMPACT_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* Get a compaction operation from the server, sort the entries from the
|
||||
@@ -2245,8 +2124,8 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
struct super_block *sb = srinf->sb;
|
||||
struct scoutfs_block_writer wri;
|
||||
struct scoutfs_alloc alloc;
|
||||
unsigned long delay;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
sc = kmalloc(sizeof(struct scoutfs_srch_compact), GFP_NOFS);
|
||||
if (sc == NULL) {
|
||||
@@ -2257,8 +2136,6 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
scoutfs_block_writer_init(sb, &wri);
|
||||
|
||||
ret = scoutfs_client_srch_get_compact(sb, sc);
|
||||
if (ret >= 0)
|
||||
trace_scoutfs_srch_compact_client_recv(sb, sc);
|
||||
if (ret < 0 || sc->nr == 0)
|
||||
goto out;
|
||||
|
||||
@@ -2276,77 +2153,30 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto commit;
|
||||
|
||||
scoutfs_alloc_prepare_commit(sb, &alloc, &wri);
|
||||
if (ret == 0)
|
||||
scoutfs_block_writer_write(sb, &wri);
|
||||
|
||||
ret = scoutfs_block_writer_write(sb, &wri);
|
||||
commit:
|
||||
/* the server won't use our partial compact if _ERROR is set */
|
||||
sc->meta_avail = alloc.avail;
|
||||
sc->meta_freed = alloc.freed;
|
||||
sc->flags |= ret < 0 ? SCOUTFS_SRCH_COMPACT_FLAG_ERROR : 0;
|
||||
|
||||
trace_scoutfs_srch_compact_client_send(sb, sc);
|
||||
err = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
ret = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
out:
|
||||
/* our allocators and files should be stable */
|
||||
WARN_ON_ONCE(ret == -ESTALE);
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, srch_compact_error);
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
queue_compact_work(srinf, sc != NULL && sc->nr > 0 && ret == 0);
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
|
||||
kfree(sc);
|
||||
}
|
||||
|
||||
static ssize_t compact_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_SRCH_INFO_KOBJ(kobj, srinf);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", atomic_read(&srinf->compact_delay_ms));
|
||||
}
|
||||
|
||||
#define MIN_COMPACT_DELAY_MS MSEC_PER_SEC
|
||||
#define DEF_COMPACT_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_COMPACT_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
static ssize_t compact_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_COMPACT_DELAY_MS || val > MAX_COMPACT_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid compact_delay_ms value, must be between %lu and %lu",
|
||||
MIN_COMPACT_DELAY_MS, MAX_COMPACT_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_set(&srinf->compact_delay_ms, val);
|
||||
cancel_delayed_work(&srinf->compact_dwork);
|
||||
queue_compact_work(srinf, false);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(compact_delay_ms);
|
||||
|
||||
static struct attribute *srch_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(compact_delay_ms),
|
||||
NULL,
|
||||
};
|
||||
|
||||
void scoutfs_srch_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -2363,8 +2193,6 @@ void scoutfs_srch_destroy(struct super_block *sb)
|
||||
destroy_workqueue(srinf->workq);
|
||||
}
|
||||
|
||||
scoutfs_sysfs_destroy_attrs(sb, &srinf->ssa);
|
||||
|
||||
kfree(srinf);
|
||||
sbi->srch_info = NULL;
|
||||
}
|
||||
@@ -2382,15 +2210,8 @@ int scoutfs_srch_setup(struct super_block *sb)
|
||||
srinf->sb = sb;
|
||||
atomic_set(&srinf->shutdown, 0);
|
||||
INIT_DELAYED_WORK(&srinf->compact_dwork, scoutfs_srch_compact_worker);
|
||||
scoutfs_sysfs_init_attrs(sb, &srinf->ssa);
|
||||
atomic_set(&srinf->compact_delay_ms, DEF_COMPACT_DELAY_MS);
|
||||
|
||||
sbi->srch_info = srinf;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &srinf->ssa, srch_attrs, "srch");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
srinf->workq = alloc_workqueue("scoutfs_srch_compact",
|
||||
WQ_NON_REENTRANT | WQ_UNBOUND |
|
||||
WQ_HIGHPRI, 0);
|
||||
@@ -2399,7 +2220,8 @@ int scoutfs_srch_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue_compact_work(srinf, false);
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork,
|
||||
msecs_to_jiffies(SRCH_COMPACT_DELAY_MS));
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
@@ -37,7 +37,7 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_srch_file *sfl, bool force);
|
||||
struct scoutfs_srch_file *sfl);
|
||||
int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
|
||||
402
kmod/src/super.c
402
kmod/src/super.c
@@ -13,7 +13,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/magic.h>
|
||||
@@ -21,6 +20,7 @@
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "block.h"
|
||||
@@ -44,45 +44,70 @@
|
||||
#include "srch.h"
|
||||
#include "item.h"
|
||||
#include "alloc.h"
|
||||
#include "recov.h"
|
||||
#include "omap.h"
|
||||
#include "volopt.h"
|
||||
#include "fence.h"
|
||||
#include "xattr.h"
|
||||
#include "wkic.h"
|
||||
#include "quota.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
static struct dentry *scoutfs_debugfs_root;
|
||||
|
||||
/* the statfs file fields can be small (and signed?) :/ */
|
||||
static __statfs_word saturate_truncated_word(u64 files)
|
||||
{
|
||||
__statfs_word word = files;
|
||||
static DEFINE_PER_CPU(u64, clock_sync_ids) = 0;
|
||||
|
||||
if (word != files) {
|
||||
word = ~0ULL;
|
||||
if (word < 0)
|
||||
word = (unsigned long)word >> 1;
|
||||
/*
|
||||
* Give the caller a unique clock sync id for a message they're about to
|
||||
* send. We make the ids reasonably globally unique by using randomly
|
||||
* initialized per-cpu 64bit counters.
|
||||
*/
|
||||
__le64 scoutfs_clock_sync_id(void)
|
||||
{
|
||||
u64 rnd = 0;
|
||||
u64 ret;
|
||||
u64 *id;
|
||||
|
||||
retry:
|
||||
preempt_disable();
|
||||
id = this_cpu_ptr(&clock_sync_ids);
|
||||
if (*id == 0) {
|
||||
if (rnd == 0) {
|
||||
preempt_enable();
|
||||
get_random_bytes(&rnd, sizeof(rnd));
|
||||
goto retry;
|
||||
}
|
||||
*id = rnd;
|
||||
}
|
||||
|
||||
return word;
|
||||
ret = ++(*id);
|
||||
preempt_enable();
|
||||
|
||||
return cpu_to_le64(ret);
|
||||
}
|
||||
|
||||
struct statfs_free_blocks {
|
||||
u64 meta;
|
||||
u64 data;
|
||||
};
|
||||
|
||||
static int count_free_blocks(struct super_block *sb, void *arg, int owner,
|
||||
u64 id, bool meta, bool avail, u64 blocks)
|
||||
{
|
||||
struct statfs_free_blocks *sfb = arg;
|
||||
|
||||
if (meta)
|
||||
sfb->meta += blocks;
|
||||
else
|
||||
sfb->data += blocks;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server gives us the current sum of free blocks and the total
|
||||
* inode count that it can see across all the clients' log trees. It
|
||||
* won't see allocations and inode creations or deletions that are dirty
|
||||
* in client memory as it builds a transaction.
|
||||
* Build the free block counts by having alloc read all the persistent
|
||||
* blocks which contain allocators and calling us for each of them.
|
||||
* Only the super block reads aren't cached so repeatedly calling statfs
|
||||
* is like repeated O_DIRECT IO. We can add a cache and stale results
|
||||
* if that IO becomes a problem.
|
||||
*
|
||||
* We don't have static limits on the number of files so the statfs
|
||||
* fields for the total possible files and the number free isn't
|
||||
* particularly helpful. What we do want to report is the number of
|
||||
* inodes, so we fake a max possible number of inodes given a
|
||||
* conservative estimate of the total space consumption per file and
|
||||
* then find the free by subtracting our precise count of active inodes.
|
||||
* This seems like the least surprising compromise where the file max
|
||||
* doesn't change and the caller gets the correct count of used inodes.
|
||||
* We fake the number of free inodes value by assuming that we can fill
|
||||
* free blocks with a certain number of inodes. We then the number of
|
||||
* current inodes to that free count to determine the total possible
|
||||
* inodes.
|
||||
*
|
||||
* The fsid that we report is constructed from the xor of the first two
|
||||
* and second two little endian u32s that make up the uuid bytes.
|
||||
@@ -90,33 +115,41 @@ static __statfs_word saturate_truncated_word(u64 files)
|
||||
static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
|
||||
{
|
||||
struct super_block *sb = dentry->d_inode->i_sb;
|
||||
struct scoutfs_net_statfs nst;
|
||||
u64 files;
|
||||
u64 ffree;
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct statfs_free_blocks sfb = {0,};
|
||||
__le32 uuid[4];
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, statfs);
|
||||
|
||||
ret = scoutfs_client_statfs(sb, &nst);
|
||||
super = kzalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
kst->f_bfree = (le64_to_cpu(nst.free_meta_blocks) << SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(nst.free_data_blocks);
|
||||
ret = scoutfs_alloc_foreach(sb, count_free_blocks, &sfb);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
kst->f_bfree = (sfb.meta << SCOUTFS_BLOCK_SM_LG_SHIFT) + sfb.data;
|
||||
kst->f_type = SCOUTFS_SUPER_MAGIC;
|
||||
kst->f_bsize = SCOUTFS_BLOCK_SM_SIZE;
|
||||
kst->f_blocks = (le64_to_cpu(nst.total_meta_blocks) << SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(nst.total_data_blocks);
|
||||
kst->f_blocks = (le64_to_cpu(super->total_meta_blocks) <<
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(super->total_data_blocks);
|
||||
kst->f_bavail = kst->f_bfree;
|
||||
|
||||
files = div_u64(le64_to_cpu(nst.total_meta_blocks) << SCOUTFS_BLOCK_LG_SHIFT, 2048);
|
||||
ffree = files - le64_to_cpu(nst.inode_count);
|
||||
kst->f_files = saturate_truncated_word(files);
|
||||
kst->f_ffree = saturate_truncated_word(ffree);
|
||||
/* arbitrarily assume ~1K / empty file */
|
||||
kst->f_ffree = sfb.meta * (SCOUTFS_BLOCK_LG_SIZE / 1024);
|
||||
kst->f_files = kst->f_ffree + le64_to_cpu(super->next_ino);
|
||||
|
||||
BUILD_BUG_ON(sizeof(uuid) != sizeof(nst.uuid));
|
||||
memcpy(uuid, nst.uuid, sizeof(uuid));
|
||||
BUILD_BUG_ON(sizeof(uuid) != sizeof(super->uuid));
|
||||
memcpy(uuid, super->uuid, sizeof(uuid));
|
||||
kst->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[1]);
|
||||
kst->f_fsid.val[1] = le32_to_cpu(uuid[2]) ^ le32_to_cpu(uuid[3]);
|
||||
kst->f_namelen = SCOUTFS_NAME_LEN;
|
||||
@@ -125,17 +158,57 @@ static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
|
||||
/* the vfs fills f_flags */
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(super);
|
||||
|
||||
/*
|
||||
* We don't take cluster locks in statfs which makes it a very
|
||||
* convenient place to trigger lock reclaim for debugging. We
|
||||
* try to free as many locks as possible.
|
||||
*/
|
||||
if (scoutfs_trigger(sb, STATFS_LOCK_PURGE))
|
||||
scoutfs_free_unused_locks(sb);
|
||||
scoutfs_free_unused_locks(sb, -1UL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts->metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int scoutfs_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
trace_scoutfs_sync_fs(sb, wait);
|
||||
@@ -153,25 +226,7 @@ static void scoutfs_metadev_close(struct super_block *sb)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi->meta_bdev) {
|
||||
/*
|
||||
* Some kernels have blkdev_reread_part which calls
|
||||
* fsync_bdev while holding the bd_mutex which inverts
|
||||
* the s_umount hold in deactivate_super and blkdev_put
|
||||
* from kill_sb->put_super.
|
||||
*/
|
||||
lockdep_off();
|
||||
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
bdev_fput(sbi->meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
blkdev_put(sbi->meta_bdev, sb);
|
||||
#else
|
||||
blkdev_put(sbi->meta_bdev, SCOUTFS_META_BDEV_MODE);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
lockdep_on();
|
||||
sbi->meta_bdev = NULL;
|
||||
}
|
||||
}
|
||||
@@ -188,69 +243,41 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
|
||||
trace_scoutfs_put_super(sb);
|
||||
|
||||
/*
|
||||
* Wait for invalidation and iput to finish with any lingering
|
||||
* inode references that escaped the evict_inodes in
|
||||
* generic_shutdown_super. SB_ACTIVE is clear so final iput
|
||||
* will always evict.
|
||||
*/
|
||||
scoutfs_lock_flush_invalidate(sb);
|
||||
scoutfs_inode_flush_iput(sb);
|
||||
WARN_ON_ONCE(!list_empty(&sb->s_inodes));
|
||||
sbi->shutdown = true;
|
||||
|
||||
scoutfs_forest_stop(sb);
|
||||
scoutfs_data_destroy(sb);
|
||||
scoutfs_srch_destroy(sb);
|
||||
|
||||
scoutfs_lock_shutdown(sb);
|
||||
scoutfs_unlock(sb, sbi->rid_lock, SCOUTFS_LOCK_WRITE);
|
||||
sbi->rid_lock = NULL;
|
||||
|
||||
scoutfs_shutdown_trans(sb);
|
||||
scoutfs_volopt_destroy(sb);
|
||||
scoutfs_client_destroy(sb);
|
||||
scoutfs_quota_destroy(sb);
|
||||
scoutfs_inode_destroy(sb);
|
||||
scoutfs_wkic_destroy(sb);
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
scoutfs_data_destroy(sb);
|
||||
|
||||
scoutfs_quorum_destroy(sb);
|
||||
scoutfs_lock_shutdown(sb);
|
||||
scoutfs_server_destroy(sb);
|
||||
scoutfs_recov_destroy(sb);
|
||||
scoutfs_net_destroy(sb);
|
||||
scoutfs_lock_destroy(sb);
|
||||
scoutfs_omap_destroy(sb);
|
||||
|
||||
scoutfs_block_destroy(sb);
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_fence_destroy(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
scoutfs_sysfs_destroy_attrs(sb, &sbi->mopts_ssa);
|
||||
debugfs_remove(sbi->debug_root);
|
||||
scoutfs_destroy_counters(sb);
|
||||
scoutfs_destroy_sysfs(sb);
|
||||
scoutfs_metadev_close(sb);
|
||||
|
||||
kfree(sbi->opts.metadev_path);
|
||||
kfree(sbi);
|
||||
|
||||
sb->s_fs_info = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that we're performing a forced unmount. As put_super drives
|
||||
* destruction of the filesystem we won't issue more network or storage
|
||||
* operations because we assume that they'll hang. Pending operations
|
||||
* can return errors when it's possible to do so. We may be racing with
|
||||
* pending operations which can't be canceled.
|
||||
*/
|
||||
static void scoutfs_umount_begin(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
scoutfs_warn(sb, "forcing unmount, can return errors and lose unsynced data");
|
||||
sbi->forced_unmount = true;
|
||||
|
||||
scoutfs_client_net_shutdown(sb);
|
||||
}
|
||||
|
||||
static const struct super_operations scoutfs_super_ops = {
|
||||
.alloc_inode = scoutfs_alloc_inode,
|
||||
.drop_inode = scoutfs_drop_inode,
|
||||
@@ -258,9 +285,8 @@ static const struct super_operations scoutfs_super_ops = {
|
||||
.destroy_inode = scoutfs_destroy_inode,
|
||||
.sync_fs = scoutfs_sync_fs,
|
||||
.statfs = scoutfs_statfs,
|
||||
.show_options = scoutfs_options_show,
|
||||
.show_options = scoutfs_show_options,
|
||||
.put_super = scoutfs_put_super,
|
||||
.umount_begin = scoutfs_umount_begin,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -280,16 +306,28 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
sizeof(struct scoutfs_super_block));
|
||||
}
|
||||
|
||||
static bool small_bdev(struct super_block *sb, char *which, u64 blocks,
|
||||
struct block_device *bdev, int shift)
|
||||
static bool invalid_blkno_limits(struct super_block *sb, char *which,
|
||||
u64 start, __le64 first, __le64 last,
|
||||
struct block_device *bdev, int shift)
|
||||
{
|
||||
u64 size = (u64)i_size_read(bdev->bd_inode);
|
||||
u64 count = size >> shift;
|
||||
u64 blkno;
|
||||
|
||||
if (blocks > count) {
|
||||
scoutfs_err(sb, "super block records %llu %s blocks, but device %u:%u size %llu only allows %llu blocks",
|
||||
blocks, which, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev), size, count);
|
||||
if (le64_to_cpu(first) < start) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
|
||||
which, le64_to_cpu(first), start);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(first) > le64_to_cpu(last)) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
|
||||
which, le64_to_cpu(first), which, le64_to_cpu(last));
|
||||
return true;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
|
||||
if (le64_to_cpu(last) > blkno) {
|
||||
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
|
||||
which, le64_to_cpu(last), blkno);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -338,32 +376,27 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->fmt_vers) < SCOUTFS_FORMAT_VERSION_MIN ||
|
||||
le64_to_cpu(super->fmt_vers) > SCOUTFS_FORMAT_VERSION_MAX) {
|
||||
scoutfs_err(sb, "super block has format version %llu outside of supported version range %u-%u",
|
||||
le64_to_cpu(super->fmt_vers), SCOUTFS_FORMAT_VERSION_MIN,
|
||||
SCOUTFS_FORMAT_VERSION_MAX);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* fill_supers checks the fmt_vers in both supers and then decides to use it.
|
||||
* From then on we verify that the supers we read have that version.
|
||||
*/
|
||||
if (sbi->fmt_vers != 0 && le64_to_cpu(super->fmt_vers) != sbi->fmt_vers) {
|
||||
scoutfs_err(sb, "super block has format version %llu than %llu read at mount",
|
||||
le64_to_cpu(super->fmt_vers), sbi->fmt_vers);
|
||||
if (super->version != cpu_to_le64(SCOUTFS_INTEROP_VERSION)) {
|
||||
scoutfs_err(sb, "super block has invalid version %llu, expected %llu",
|
||||
le64_to_cpu(super->version),
|
||||
SCOUTFS_INTEROP_VERSION);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX do we want more rigorous invalid super checking? */
|
||||
|
||||
if (small_bdev(sb, "metadata", le64_to_cpu(super->total_meta_blocks), sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
small_bdev(sb, "data", le64_to_cpu(super->total_data_blocks), sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
if (invalid_blkno_limits(sb, "meta",
|
||||
SCOUTFS_META_DEV_START_BLKNO,
|
||||
super->first_meta_blkno,
|
||||
super->last_meta_blkno, sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
invalid_blkno_limits(sb, "data",
|
||||
SCOUTFS_DATA_DEV_START_BLKNO,
|
||||
super->first_data_blkno,
|
||||
super->last_data_blkno, sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -470,14 +503,7 @@ static int scoutfs_read_supers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(meta_super->fmt_vers) != le64_to_cpu(data_super->fmt_vers)) {
|
||||
scoutfs_err(sb, "meta device format version %llu != data device format version %llu",
|
||||
le64_to_cpu(meta_super->fmt_vers), le64_to_cpu(data_super->fmt_vers));
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbi->fsid = le64_to_cpu(meta_super->hdr.fsid);
|
||||
sbi->fmt_vers = le64_to_cpu(meta_super->fmt_vers);
|
||||
sbi->super = *meta_super;
|
||||
out:
|
||||
kfree(meta_super);
|
||||
kfree(data_super);
|
||||
@@ -486,13 +512,9 @@ out:
|
||||
|
||||
static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct scoutfs_mount_options opts;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#else
|
||||
struct block_device *meta_bdev;
|
||||
#endif
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct mount_options opts;
|
||||
struct block_device *meta_bdev;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
@@ -501,34 +523,35 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_magic = SCOUTFS_SUPER_MAGIC;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
sb->s_op = &scoutfs_super_ops;
|
||||
sb->s_d_op = &scoutfs_dentry_ops;
|
||||
sb->s_export_op = &scoutfs_export_ops;
|
||||
sb->s_xattr = scoutfs_xattr_handlers;
|
||||
sb->s_flags |= SB_I_VERSION | SB_POSIXACL;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
/* btree blocks use long lived bh->b_data refs */
|
||||
mapping_set_gfp_mask(sb->s_bdev->bd_inode->i_mapping, GFP_NOFS);
|
||||
|
||||
sbi = kzalloc(sizeof(struct scoutfs_sb_info), GFP_KERNEL);
|
||||
sb->s_fs_info = sbi;
|
||||
sbi->sb = sb;
|
||||
if (!sbi)
|
||||
return -ENOMEM;
|
||||
sbi->sb = sb;
|
||||
|
||||
ret = assign_random_id(sbi);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&sbi->next_ino_lock);
|
||||
init_waitqueue_head(&sbi->trans_hold_wq);
|
||||
spin_lock_init(&sbi->data_wait_root.lock);
|
||||
sbi->data_wait_root.root = RB_ROOT;
|
||||
spin_lock_init(&sbi->trans_write_lock);
|
||||
INIT_DELAYED_WORK(&sbi->trans_write_work, scoutfs_trans_write_func);
|
||||
init_waitqueue_head(&sbi->trans_write_wq);
|
||||
scoutfs_sysfs_init_attrs(sb, &sbi->mopts_ssa);
|
||||
|
||||
/* parse options early for use during setup */
|
||||
ret = scoutfs_options_early_setup(sb, data);
|
||||
if (ret < 0)
|
||||
ret = scoutfs_parse_options(sb, data, &opts);
|
||||
if (ret)
|
||||
goto out;
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
sbi->opts = opts;
|
||||
|
||||
ret = sb_set_blocksize(sb, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != SCOUTFS_BLOCK_SM_SIZE) {
|
||||
@@ -537,27 +560,9 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
/*
|
||||
* pass sbi as holder, since dev_mount already passes sb, which triggers a
|
||||
* WARN_ON because dev_mount also passes non-NULL hops. By passing sbi
|
||||
* here we just get a simple error in our test cases.
|
||||
*/
|
||||
meta_bdev_file = bdev_file_open_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sbi, NULL);
|
||||
if (IS_ERR(meta_bdev_file)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev_file));
|
||||
ret = PTR_ERR(meta_bdev_file);
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev_file = meta_bdev_file;
|
||||
sbi->meta_bdev = file_bdev(meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb, NULL);
|
||||
#else
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb);
|
||||
#endif
|
||||
meta_bdev =
|
||||
blkdev_get_by_path(sbi->opts.metadev_path,
|
||||
SCOUTFS_META_BDEV_MODE, sb);
|
||||
if (IS_ERR(meta_bdev)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev));
|
||||
@@ -565,8 +570,6 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev = meta_bdev;
|
||||
#endif
|
||||
|
||||
ret = set_blocksize(sbi->meta_bdev, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != 0) {
|
||||
scoutfs_err(sb, "failed to set metadev blocksize, returned %d",
|
||||
@@ -579,34 +582,30 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_sysfs(sb) ?:
|
||||
scoutfs_setup_counters(sb) ?:
|
||||
scoutfs_options_setup(sb) ?:
|
||||
scoutfs_sysfs_create_attrs(sb, &sbi->mopts_ssa,
|
||||
mount_options_attrs, "mount_options") ?:
|
||||
scoutfs_setup_triggers(sb) ?:
|
||||
scoutfs_fence_setup(sb) ?:
|
||||
scoutfs_block_setup(sb) ?:
|
||||
scoutfs_forest_setup(sb) ?:
|
||||
scoutfs_item_setup(sb) ?:
|
||||
scoutfs_wkic_setup(sb) ?:
|
||||
scoutfs_inode_setup(sb) ?:
|
||||
scoutfs_quota_setup(sb) ?:
|
||||
scoutfs_data_setup(sb) ?:
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_omap_setup(sb) ?:
|
||||
scoutfs_lock_setup(sb) ?:
|
||||
scoutfs_net_setup(sb) ?:
|
||||
scoutfs_recov_setup(sb) ?:
|
||||
scoutfs_server_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_client_setup(sb) ?:
|
||||
scoutfs_volopt_setup(sb) ?:
|
||||
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
|
||||
&sbi->rid_lock) ?:
|
||||
scoutfs_trans_get_log_trees(sb) ?:
|
||||
scoutfs_srch_setup(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* this interruptible iget lets hung mount be aborted with ctl-c */
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO, SCOUTFS_LKF_INTERRUPTIBLE, 0);
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -616,15 +615,12 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* send requests once iget progress shows we had a server */
|
||||
ret = scoutfs_trans_get_log_trees(sb);
|
||||
ret = scoutfs_client_advance_seq(sb, &sbi->trans_seq);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* start up background services that use everything else */
|
||||
scoutfs_inode_start(sb);
|
||||
scoutfs_forest_start(sb);
|
||||
scoutfs_trans_restart_sync_deadline(sb);
|
||||
// scoutfs_scan_orphans(sb);
|
||||
ret = 0;
|
||||
out:
|
||||
/* on error, generic_shutdown_super calls put_super if s_root */
|
||||
@@ -645,18 +641,7 @@ static struct dentry *scoutfs_mount(struct file_system_type *fs_type, int flags,
|
||||
*/
|
||||
static void scoutfs_kill_sb(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi) {
|
||||
sbi->unmounting = true;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
if (SCOUTFS_HAS_SBI(sb)) {
|
||||
scoutfs_options_stop(sb);
|
||||
scoutfs_inode_orphan_stop(sb);
|
||||
scoutfs_lock_unmount_begin(sb);
|
||||
}
|
||||
trace_scoutfs_kill_sb(sb);
|
||||
|
||||
kill_block_super(sb);
|
||||
}
|
||||
@@ -674,6 +659,7 @@ MODULE_ALIAS_FS("scoutfs");
|
||||
static void teardown_module(void)
|
||||
{
|
||||
debugfs_remove(scoutfs_debugfs_root);
|
||||
scoutfs_dir_exit();
|
||||
scoutfs_inode_exit();
|
||||
scoutfs_sysfs_exit();
|
||||
}
|
||||
@@ -688,15 +674,11 @@ static int __init scoutfs_module_init(void)
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
".section .note.git_describe,\"a\"\n"
|
||||
".ascii \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".string \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_format_version_min,\"a\"\n"
|
||||
".ascii \""SCOUTFS_FORMAT_VERSION_MIN_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_format_version_max,\"a\"\n"
|
||||
".ascii \""SCOUTFS_FORMAT_VERSION_MAX_STR"\\n\"\n"
|
||||
".section .note.scoutfs_interop_version,\"a\"\n"
|
||||
".string \""SCOUTFS_INTEROP_VERSION_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
|
||||
scoutfs_init_counters();
|
||||
@@ -711,23 +693,23 @@ static int __init scoutfs_module_init(void)
|
||||
goto out;
|
||||
}
|
||||
ret = scoutfs_inode_init() ?:
|
||||
scoutfs_dir_init() ?:
|
||||
register_filesystem(&scoutfs_fs_type);
|
||||
out:
|
||||
if (ret)
|
||||
teardown_module();
|
||||
return ret;
|
||||
}
|
||||
module_init(scoutfs_module_init);
|
||||
module_init(scoutfs_module_init)
|
||||
|
||||
static void __exit scoutfs_module_exit(void)
|
||||
{
|
||||
unregister_filesystem(&scoutfs_fs_type);
|
||||
teardown_module();
|
||||
}
|
||||
module_exit(scoutfs_module_exit);
|
||||
module_exit(scoutfs_module_exit)
|
||||
|
||||
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_INFO(git_describe, SCOUTFS_GIT_DESCRIBE);
|
||||
MODULE_INFO(scoutfs_format_version_min, SCOUTFS_FORMAT_VERSION_MIN_STR);
|
||||
MODULE_INFO(scoutfs_format_version_max, SCOUTFS_FORMAT_VERSION_MAX_STR);
|
||||
MODULE_INFO(scoutfs_interop_version, SCOUTFS_INTEROP_VERSION_STR);
|
||||
|
||||
@@ -26,29 +26,20 @@ struct net_info;
|
||||
struct block_info;
|
||||
struct forest_info;
|
||||
struct srch_info;
|
||||
struct recov_info;
|
||||
struct omap_info;
|
||||
struct volopt_info;
|
||||
struct fence_info;
|
||||
struct wkic_info;
|
||||
struct squota_info;
|
||||
|
||||
struct scoutfs_sb_info {
|
||||
struct super_block *sb;
|
||||
|
||||
/* assigned once at the start of each mount, read-only */
|
||||
u64 fsid;
|
||||
u64 rid;
|
||||
u64 fmt_vers;
|
||||
struct scoutfs_lock *rid_lock;
|
||||
|
||||
struct scoutfs_super_block super;
|
||||
|
||||
struct block_device *meta_bdev;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#endif
|
||||
|
||||
spinlock_t next_ino_lock;
|
||||
|
||||
struct options_info *options_info;
|
||||
struct data_info *data_info;
|
||||
struct inode_sb_info *inode_sb_info;
|
||||
struct btree_info *btree_info;
|
||||
@@ -57,34 +48,40 @@ struct scoutfs_sb_info {
|
||||
struct block_info *block_info;
|
||||
struct forest_info *forest_info;
|
||||
struct srch_info *srch_info;
|
||||
struct omap_info *omap_info;
|
||||
struct volopt_info *volopt_info;
|
||||
struct item_cache_info *item_cache_info;
|
||||
struct wkic_info *wkic_info;
|
||||
struct squota_info *squota_info;
|
||||
struct fence_info *fence_info;
|
||||
|
||||
wait_queue_head_t trans_hold_wq;
|
||||
struct task_struct *trans_task;
|
||||
|
||||
/* tracks tasks waiting for data extents */
|
||||
struct scoutfs_data_wait_root data_wait_root;
|
||||
|
||||
/* set as transaction opens with trans holders excluded */
|
||||
spinlock_t trans_write_lock;
|
||||
u64 trans_write_count;
|
||||
u64 trans_seq;
|
||||
int trans_write_ret;
|
||||
struct delayed_work trans_write_work;
|
||||
wait_queue_head_t trans_write_wq;
|
||||
struct workqueue_struct *trans_write_workq;
|
||||
bool trans_deadline_expired;
|
||||
|
||||
struct trans_info *trans_info;
|
||||
struct lock_info *lock_info;
|
||||
struct lock_server_info *lock_server_info;
|
||||
struct client_info *client_info;
|
||||
struct server_info *server_info;
|
||||
struct recov_info *recov_info;
|
||||
struct sysfs_info *sfsinfo;
|
||||
|
||||
struct scoutfs_counters *counters;
|
||||
struct scoutfs_triggers *triggers;
|
||||
|
||||
struct mount_options opts;
|
||||
struct options_sb_info *options;
|
||||
struct scoutfs_sysfs_attrs mopts_ssa;
|
||||
|
||||
struct dentry *debug_root;
|
||||
|
||||
bool forced_unmount;
|
||||
bool unmounting;
|
||||
bool shutdown;
|
||||
|
||||
unsigned long corruption_messages_once[SC_NR_LONGS];
|
||||
};
|
||||
@@ -104,31 +101,7 @@ static inline bool SCOUTFS_IS_META_BDEV(struct scoutfs_super_block *super_block)
|
||||
return !!(le64_to_cpu(super_block->flags) & SCOUTFS_FLAG_IS_META_BDEV);
|
||||
}
|
||||
|
||||
#ifdef KC_HAVE_BLK_MODE_T
|
||||
#define SCOUTFS_META_BDEV_MODE (BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_EXCL)
|
||||
#else
|
||||
#define SCOUTFS_META_BDEV_MODE (FMODE_READ | FMODE_WRITE | FMODE_EXCL)
|
||||
#endif
|
||||
|
||||
static inline bool scoutfs_forcing_unmount(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return sbi->forced_unmount;
|
||||
}
|
||||
|
||||
/*
|
||||
* True if we're shutting down the system and can be used as a coarse
|
||||
* indicator that we can avoid doing some work that no longer makes
|
||||
* sense.
|
||||
*/
|
||||
static inline bool scoutfs_unmounting(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
smp_rmb();
|
||||
return !sbi || sbi->unmounting;
|
||||
}
|
||||
|
||||
/*
|
||||
* A small string embedded in messages that's used to identify a
|
||||
@@ -145,14 +118,14 @@ static inline bool scoutfs_unmounting(struct super_block *sb)
|
||||
(int)(le64_to_cpu(fsid) >> SCSB_SHIFT), \
|
||||
(int)(le64_to_cpu(rid) >> SCSB_SHIFT)
|
||||
#define SCSB_ARGS(sb) \
|
||||
(int)(SCOUTFS_SB(sb)->fsid >> SCSB_SHIFT), \
|
||||
(int)(le64_to_cpu(SCOUTFS_SB(sb)->super.hdr.fsid) >> SCSB_SHIFT), \
|
||||
(int)(SCOUTFS_SB(sb)->rid >> SCSB_SHIFT)
|
||||
#define SCSB_TRACE_FIELDS \
|
||||
__field(__u64, fsid) \
|
||||
__field(__u64, rid)
|
||||
#define SCSB_TRACE_ASSIGN(sb) \
|
||||
__entry->fsid = SCOUTFS_HAS_SBI(sb) ? \
|
||||
SCOUTFS_SB(sb)->fsid : 0; \
|
||||
le64_to_cpu(SCOUTFS_SB(sb)->super.hdr.fsid) : 0;\
|
||||
__entry->rid = SCOUTFS_HAS_SBI(sb) ? \
|
||||
SCOUTFS_SB(sb)->rid : 0;
|
||||
#define SCSB_TRACE_ARGS \
|
||||
@@ -167,17 +140,6 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
/* to keep this out of the ioctl.h public interface definition */
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
||||
/*
|
||||
* Returns 0 when supported, non-zero -errno when unsupported.
|
||||
*/
|
||||
static inline int scoutfs_fmt_vers_unsupported(struct super_block *sb, u64 vers)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi && (sbi->fmt_vers < vers))
|
||||
return -EOPNOTSUPP;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
__le64 scoutfs_clock_sync_id(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "sysfs.h"
|
||||
@@ -38,32 +37,14 @@ struct attr_funcs {
|
||||
#define ATTR_FUNCS_RO(_name) \
|
||||
static struct attr_funcs _name##_attr_funcs = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t data_device_maj_min_show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
MAJOR(sb->s_bdev->bd_dev), MINOR(sb->s_bdev->bd_dev));
|
||||
}
|
||||
ATTR_FUNCS_RO(data_device_maj_min);
|
||||
|
||||
static ssize_t format_version_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sbi->fmt_vers);
|
||||
}
|
||||
ATTR_FUNCS_RO(format_version);
|
||||
|
||||
static ssize_t fsid_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n", sbi->fsid);
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n",
|
||||
le64_to_cpu(super->hdr.fsid));
|
||||
}
|
||||
ATTR_FUNCS_RO(fsid);
|
||||
|
||||
@@ -110,8 +91,6 @@ static ssize_t attr_funcs_show(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
|
||||
static struct attribute *sb_id_attrs[] = {
|
||||
&data_device_maj_min_attr_funcs.attr,
|
||||
&format_version_attr_funcs.attr,
|
||||
&fsid_attr_funcs.attr,
|
||||
&rid_attr_funcs.attr,
|
||||
NULL,
|
||||
@@ -152,10 +131,9 @@ void scoutfs_sysfs_init_attrs(struct super_block *sb,
|
||||
* If this returns success then the file will be visible and show can
|
||||
* be called until unmount.
|
||||
*/
|
||||
int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
struct kobject *parent,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...)
|
||||
int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
size_t name_len;
|
||||
@@ -196,8 +174,8 @@ int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kobject_init_and_add(&ssa->kobj, &ssa->ktype, parent,
|
||||
"%s", ssa->name);
|
||||
ret = kobject_init_and_add(&ssa->kobj, &ssa->ktype,
|
||||
scoutfs_sysfs_sb_dir(sb), "%s", ssa->name);
|
||||
out:
|
||||
if (ret) {
|
||||
kfree(ssa->name);
|
||||
@@ -268,7 +246,7 @@ int __init scoutfs_sysfs_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void scoutfs_sysfs_exit(void)
|
||||
void __exit scoutfs_sysfs_exit(void)
|
||||
{
|
||||
if (scoutfs_kset)
|
||||
kset_unregister(scoutfs_kset);
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
|
||||
#define SCOUTFS_ATTR_RO(_name) \
|
||||
static struct kobj_attribute scoutfs_attr_##_name = __ATTR_RO(_name)
|
||||
#define SCOUTFS_ATTR_RW(_name) \
|
||||
static struct kobj_attribute scoutfs_attr_##_name = __ATTR_RW(_name)
|
||||
|
||||
#define SCOUTFS_ATTR_PTR(_name) \
|
||||
&scoutfs_attr_##_name.attr
|
||||
@@ -36,14 +34,9 @@ struct scoutfs_sysfs_attrs {
|
||||
|
||||
void scoutfs_sysfs_init_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa);
|
||||
int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
struct kobject *parent,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...);
|
||||
#define scoutfs_sysfs_create_attrs(sb, ssa, attrs, fmt, args...) \
|
||||
scoutfs_sysfs_create_attrs_parent(sb, scoutfs_sysfs_sb_dir(sb), \
|
||||
ssa, attrs, fmt, ##args)
|
||||
|
||||
int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...);
|
||||
void scoutfs_sysfs_destroy_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa);
|
||||
|
||||
@@ -53,6 +46,6 @@ int scoutfs_setup_sysfs(struct super_block *sb);
|
||||
void scoutfs_destroy_sysfs(struct super_block *sb);
|
||||
|
||||
int __init scoutfs_sysfs_init(void);
|
||||
void scoutfs_sysfs_exit(void);
|
||||
void __exit scoutfs_sysfs_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "forest.h"
|
||||
#include "totl.h"
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end)
|
||||
{
|
||||
scoutfs_key_set_zeros(start);
|
||||
start->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(end);
|
||||
end->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg)
|
||||
{
|
||||
memset(merg, 0, sizeof(struct scoutfs_totl_merging));
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic)
|
||||
{
|
||||
struct scoutfs_xattr_totl_val *tval = val;
|
||||
|
||||
if (fic & FIC_FS_ROOT) {
|
||||
merg->fs_seq = seq;
|
||||
merg->fs_total = le64_to_cpu(tval->total);
|
||||
merg->fs_count = le64_to_cpu(tval->count);
|
||||
} else if (fic & FIC_FINALIZED) {
|
||||
merg->fin_seq = seq;
|
||||
merg->fin_total += le64_to_cpu(tval->total);
|
||||
merg->fin_count += le64_to_cpu(tval->count);
|
||||
} else {
|
||||
merg->log_seq = seq;
|
||||
merg->log_total += le64_to_cpu(tval->total);
|
||||
merg->log_count += le64_to_cpu(tval->count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* .totl. item merging has to be careful because the log btree merging
|
||||
* code can write partial results to the fs_root. This means that a
|
||||
* reader can see both cases where new finalized logs should be applied
|
||||
* to the old fs items and where old finalized logs have already been
|
||||
* applied to the partially merged fs items. Currently active logged
|
||||
* items are always applied on top of all cases.
|
||||
*
|
||||
* These cases are differentiated with a combination of sequence numbers
|
||||
* in items, the count of contributing xattrs, and a flag
|
||||
* differentiating finalized and active logged items. This lets us
|
||||
* recognize all cases, including when finalized logs were merged and
|
||||
* deleted the fs item.
|
||||
*/
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count)
|
||||
{
|
||||
*total = 0;
|
||||
*count = 0;
|
||||
|
||||
/* start with the fs item if we have it */
|
||||
if (merg->fs_seq != 0) {
|
||||
*total = merg->fs_total;
|
||||
*count = merg->fs_count;
|
||||
}
|
||||
|
||||
/* apply finalized logs if they're newer or creating */
|
||||
if (((merg->fs_seq != 0) && (merg->fin_seq > merg->fs_seq)) ||
|
||||
((merg->fs_seq == 0) && (merg->fin_count > 0))) {
|
||||
*total += merg->fin_total;
|
||||
*count += merg->fin_count;
|
||||
}
|
||||
|
||||
/* always apply active logs which must be newer than fs and finalized */
|
||||
if (merg->log_seq > 0) {
|
||||
*total += merg->log_total;
|
||||
*count += merg->log_count;
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
#ifndef _SCOUTFS_TOTL_H_
|
||||
#define _SCOUTFS_TOTL_H_
|
||||
|
||||
#include "key.h"
|
||||
|
||||
struct scoutfs_totl_merging {
|
||||
u64 fs_seq;
|
||||
u64 fs_total;
|
||||
u64 fs_count;
|
||||
u64 fin_seq;
|
||||
u64 fin_total;
|
||||
s64 fin_count;
|
||||
u64 log_seq;
|
||||
u64 log_total;
|
||||
s64 log_count;
|
||||
};
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg);
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic);
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count);
|
||||
|
||||
#endif
|
||||
@@ -1,143 +0,0 @@
|
||||
|
||||
/*
|
||||
* Tracing squota_input
|
||||
*/
|
||||
#define SQI_FMT "[%u %llu %llu %llu]"
|
||||
|
||||
#define SQI_ARGS(i) \
|
||||
(i)->op, (i)->attrs[0], (i)->attrs[1], (i)->attrs[2]
|
||||
|
||||
#define SQI_FIELDS(pref) \
|
||||
__array(__u64, pref##_attrs, SQ_NS__NR_SELECT) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQI_ASSIGN(pref, i) \
|
||||
__entry->pref##_attrs[0] = (i)->attrs[0]; \
|
||||
__entry->pref##_attrs[1] = (i)->attrs[1]; \
|
||||
__entry->pref##_attrs[2] = (i)->attrs[2]; \
|
||||
__entry->pref##_op = (i)->op;
|
||||
|
||||
#define SQI_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_op, __entry->pref##_attrs[0], \
|
||||
__entry->pref##_attrs[1], __entry->pref##_attrs[2]
|
||||
|
||||
/*
|
||||
* Tracing squota_rule
|
||||
*/
|
||||
#define SQR_FMT "[%u %llu,%u,%x %llu,%u,%x %llu,%u,%x %u %llu]"
|
||||
|
||||
#define SQR_ARGS(r) \
|
||||
(r)->prio, \
|
||||
(r)->name_val[0], (r)->name_source[0], (r)->name_flags[0], \
|
||||
(r)->name_val[1], (r)->name_source[1], (r)->name_flags[1], \
|
||||
(r)->name_val[2], (r)->name_source[2], (r)->name_flags[2], \
|
||||
(r)->op, (r)->limit \
|
||||
|
||||
#define SQR_FIELDS(pref) \
|
||||
__array(__u64, pref##_name_val, 3) \
|
||||
__field(__u64, pref##_limit) \
|
||||
__array(__u8, pref##_name_source, 3) \
|
||||
__array(__u8, pref##_name_flags, 3) \
|
||||
__field(__u8, pref##_prio) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQR_ASSIGN(pref, r) \
|
||||
__entry->pref##_name_val[0] = (r)->names[0].val; \
|
||||
__entry->pref##_name_val[1] = (r)->names[1].val; \
|
||||
__entry->pref##_name_val[2] = (r)->names[2].val; \
|
||||
__entry->pref##_limit = (r)->limit; \
|
||||
__entry->pref##_name_source[0] = (r)->names[0].source; \
|
||||
__entry->pref##_name_source[1] = (r)->names[1].source; \
|
||||
__entry->pref##_name_source[2] = (r)->names[2].source; \
|
||||
__entry->pref##_name_flags[0] = (r)->names[0].flags; \
|
||||
__entry->pref##_name_flags[1] = (r)->names[1].flags; \
|
||||
__entry->pref##_name_flags[2] = (r)->names[2].flags; \
|
||||
__entry->pref##_prio = (r)->prio; \
|
||||
__entry->pref##_op = (r)->op;
|
||||
|
||||
#define SQR_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_prio, __entry->pref##_name_val[0], \
|
||||
__entry->pref##_name_source[0], __entry->pref##_name_flags[0], \
|
||||
__entry->pref##_name_val[1], __entry->pref##_name_source[1], \
|
||||
__entry->pref##_name_flags[1], __entry->pref##_name_val[2], \
|
||||
__entry->pref##_name_source[2], __entry->pref##_name_flags[2], \
|
||||
__entry->pref##_op, __entry->pref##_limit
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_check,
|
||||
TP_PROTO(struct super_block *sb, long rs_ptr, struct squota_input *inp, int ret),
|
||||
|
||||
TP_ARGS(sb, rs_ptr, inp, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(long, rs_ptr)
|
||||
SQI_FIELDS(i)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->rs_ptr = rs_ptr;
|
||||
SQI_ASSIGN(i, inp);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rs_ptr %ld ret %d inp "SQI_FMT,
|
||||
SCSB_TRACE_ARGS, __entry->rs_ptr, __entry->ret, SQI_ENTRY_ARGS(i))
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quota_rule_op_class,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
|
||||
TP_ARGS(sb, rule, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQR_FIELDS(r)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQR_ASSIGN(r, rule);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" "SQR_FMT" ret %d",
|
||||
SCSB_TRACE_ARGS, SQR_ENTRY_ARGS(r), __entry->ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_add_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_del_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_totl_check,
|
||||
TP_PROTO(struct super_block *sb, struct squota_input *inp, struct scoutfs_key *key,
|
||||
u64 limit, int ret),
|
||||
|
||||
TP_ARGS(sb, inp, key, limit, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQI_FIELDS(i)
|
||||
sk_trace_define(k)
|
||||
__field(__u64, limit)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQI_ASSIGN(i, inp);
|
||||
sk_trace_assign(k, key);
|
||||
__entry->limit = limit;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" inp "SQI_FMT" key "SK_FMT" limit %llu ret %d",
|
||||
SCSB_TRACE_ARGS, SQI_ENTRY_ARGS(i), sk_trace_args(k), __entry->limit,
|
||||
__entry->ret)
|
||||
);
|
||||
@@ -1,112 +0,0 @@
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_wkic_wpage_class,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, ptr)
|
||||
__field(int, which)
|
||||
__field(bool, n0l)
|
||||
__field(bool, n1l)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ptr = ptr;
|
||||
__entry->which = which;
|
||||
__entry->n0l = n0l;
|
||||
__entry->n1l = n1l;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
__entry->which = which;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ptr %p wh %d nl %u,%u start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
__entry->ptr, __entry->which, __entry->n0l, __entry->n1l,
|
||||
sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_alloced,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_freeing,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_found,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_trimmed,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_erased,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserting,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserted,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_shrinking,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_dropping,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_replaying,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_filled,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_wkic_read_items,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, key, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(key)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(key, start);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" key "SK_FMT" start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
sk_trace_args(key), sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
369
kmod/src/trans.c
369
kmod/src/trans.c
@@ -17,7 +17,6 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "trans.h"
|
||||
@@ -54,24 +53,15 @@
|
||||
/* sync dirty data at least this often */
|
||||
#define TRANS_SYNC_DELAY (HZ * 10)
|
||||
|
||||
/*
|
||||
* XXX move the rest of the super trans_ fields here.
|
||||
*/
|
||||
struct trans_info {
|
||||
struct super_block *sb;
|
||||
|
||||
atomic_t holders;
|
||||
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_alloc alloc;
|
||||
struct scoutfs_block_writer wri;
|
||||
|
||||
wait_queue_head_t hold_wq;
|
||||
struct task_struct *task;
|
||||
spinlock_t write_lock;
|
||||
u64 write_count;
|
||||
int write_ret;
|
||||
struct delayed_work write_work;
|
||||
wait_queue_head_t write_wq;
|
||||
struct workqueue_struct *write_workq;
|
||||
bool deadline_expired;
|
||||
};
|
||||
|
||||
#define DECLARE_TRANS_INFO(sb, name) \
|
||||
@@ -101,7 +91,6 @@ static int commit_btrees(struct super_block *sb)
|
||||
*/
|
||||
int scoutfs_trans_get_log_trees(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_log_trees lt;
|
||||
int ret = 0;
|
||||
@@ -114,11 +103,6 @@ int scoutfs_trans_get_log_trees(struct super_block *sb)
|
||||
|
||||
scoutfs_forest_init_btrees(sb, &tri->alloc, &tri->wri, <);
|
||||
scoutfs_data_init_btrees(sb, &tri->alloc, &tri->wri, <);
|
||||
|
||||
/* first set during mount from 0 to nonzero allows commits */
|
||||
spin_lock(&tri->write_lock);
|
||||
sbi->trans_seq = le64_to_cpu(lt.get_trans_seq);
|
||||
spin_unlock(&tri->write_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -136,12 +120,13 @@ bool scoutfs_trans_has_dirty(struct super_block *sb)
|
||||
*/
|
||||
static void sub_holders_and_wake(struct super_block *sb, int val)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
atomic_sub(val, &tri->holders);
|
||||
smp_mb(); /* make sure sub is visible before we wake */
|
||||
if (waitqueue_active(&tri->hold_wq))
|
||||
wake_up(&tri->hold_wq);
|
||||
if (waitqueue_active(&sbi->trans_hold_wq))
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -159,58 +144,6 @@ static bool drained_holders(struct trans_info *tri)
|
||||
return holders == 0;
|
||||
}
|
||||
|
||||
static int commit_current_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
return (*str = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(*str = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(*str = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(*str = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri)) ?:
|
||||
(*str = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(*str = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(*str = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb);
|
||||
}
|
||||
|
||||
static int get_next_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
return (*str = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
}
|
||||
|
||||
static int retry_forever(struct super_block *sb, int (*func)(struct super_block *sb, char **str))
|
||||
{
|
||||
bool retrying = false;
|
||||
char *str;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
str = NULL;
|
||||
|
||||
ret = func(sb, &str);
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
str, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -ENOLINK;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work func is responsible for writing out all the dirty blocks
|
||||
* that make up the current dirty transaction. It prevents writers from
|
||||
@@ -221,63 +154,90 @@ static int retry_forever(struct super_block *sb, int (*func)(struct super_block
|
||||
* functions that would try to hold the transaction. We record the task
|
||||
* whose committing the transaction so that holding won't deadlock.
|
||||
*
|
||||
* Once we clear the write func bit in holders then waiting holders can
|
||||
* enter the transaction and continue modifying the transaction. Once
|
||||
* we start writing we consider the transaction done and won't exit,
|
||||
* clearing the write func bit, until get_log_trees has opened the next
|
||||
* transaction. The exception is forced unmount which is allowed to
|
||||
* generate errors and throw away data.
|
||||
* Any dirty block had to have allocated a new blkno which would have
|
||||
* created dirty allocator metadata blocks. We can avoid writing
|
||||
* entirely if we don't have any dirty metadata blocks. This is
|
||||
* important because we don't try to serialize this work during
|
||||
* unmount.. we can execute as the vfs is shutting down.. we need to
|
||||
* decide that nothing is dirty without calling the vfs at all.
|
||||
*
|
||||
* This means that the only way fsync can return an error is if we're in
|
||||
* forced unmount.
|
||||
* We first try to sync the dirty inodes and write their dirty data blocks,
|
||||
* then we write all our dirty metadata blocks, and only when those succeed
|
||||
* do we write the new super that references all of these newly written blocks.
|
||||
*
|
||||
* If there are write errors then blocks are kept dirty in memory and will
|
||||
* be written again at the next sync.
|
||||
*/
|
||||
void scoutfs_trans_write_func(struct work_struct *work)
|
||||
{
|
||||
struct trans_info *tri = container_of(work, struct trans_info, write_work.work);
|
||||
struct super_block *sb = tri->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_sb_info *sbi = container_of(work, struct scoutfs_sb_info,
|
||||
trans_write_work.work);
|
||||
struct super_block *sb = sbi->sb;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
u64 trans_seq = sbi->trans_seq;
|
||||
char *s = NULL;
|
||||
int ret = 0;
|
||||
|
||||
tri->task = current;
|
||||
sbi->trans_task = current;
|
||||
|
||||
/* mark that we're writing so holders wait for us to finish and clear our bit */
|
||||
atomic_add(TRANS_HOLDERS_WRITE_FUNC_BIT, &tri->holders);
|
||||
|
||||
wait_event(tri->hold_wq, drained_holders(tri));
|
||||
wait_event(sbi->trans_hold_wq, drained_holders(tri));
|
||||
|
||||
/* mount hasn't opened first transaction yet, still complete sync */
|
||||
if (sbi->trans_seq == 0) {
|
||||
ret = 0;
|
||||
trace_scoutfs_trans_write_func(sb,
|
||||
scoutfs_block_writer_dirty_bytes(sb, &tri->wri));
|
||||
|
||||
if (!scoutfs_block_writer_has_dirty(sb, &tri->wri) &&
|
||||
!scoutfs_item_dirty_pages(sb)) {
|
||||
if (sbi->trans_deadline_expired) {
|
||||
/*
|
||||
* If we're not writing data then we only advance the
|
||||
* seq at the sync deadline interval. This keeps idle
|
||||
* mounts from pinning a seq and stopping readers of the
|
||||
* seq indices but doesn't send a message for every sync
|
||||
* syscall.
|
||||
*/
|
||||
ret = scoutfs_client_advance_seq(sb, &trans_seq);
|
||||
if (ret < 0)
|
||||
s = "clean advance seq";
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -ENOLINK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_scoutfs_trans_write_func(sb, scoutfs_block_writer_dirty_bytes(sb, &tri->wri),
|
||||
scoutfs_item_dirty_pages(sb));
|
||||
|
||||
if (tri->deadline_expired)
|
||||
if (sbi->trans_deadline_expired)
|
||||
scoutfs_inc_counter(sb, trans_commit_timer);
|
||||
|
||||
scoutfs_inc_counter(sb, trans_commit_written);
|
||||
|
||||
/* retry {commit,get}_log_trees until they succeeed, can only fail when forcing unmount */
|
||||
ret = retry_forever(sb, commit_current_log_trees) ?:
|
||||
retry_forever(sb, get_next_log_trees);
|
||||
/* XXX this all needs serious work for dealing with errors */
|
||||
ret = (s = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(s = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(s = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(s = "alloc prepare", scoutfs_alloc_prepare_commit(sb,
|
||||
&tri->alloc, &tri->wri)) ?:
|
||||
(s = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(s = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(s = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb) ?:
|
||||
(s = "advance seq", scoutfs_client_advance_seq(sb, &trans_seq)) ?:
|
||||
(s = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
out:
|
||||
spin_lock(&tri->write_lock);
|
||||
tri->write_count++;
|
||||
tri->write_ret = ret;
|
||||
spin_unlock(&tri->write_lock);
|
||||
wake_up(&tri->write_wq);
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "critical transaction commit failure: %s, %d",
|
||||
s, ret);
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
sbi->trans_write_count++;
|
||||
sbi->trans_write_ret = ret;
|
||||
sbi->trans_seq = trans_seq;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
wake_up(&sbi->trans_write_wq);
|
||||
|
||||
/* we're done, wake waiting holders */
|
||||
sub_holders_and_wake(sb, TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
|
||||
tri->task = NULL;
|
||||
sbi->trans_task = NULL;
|
||||
|
||||
scoutfs_trans_restart_sync_deadline(sb);
|
||||
}
|
||||
@@ -288,17 +248,17 @@ struct write_attempt {
|
||||
};
|
||||
|
||||
/* this is called as a wait_event() condition so it can't change task state */
|
||||
static int write_attempted(struct super_block *sb, struct write_attempt *attempt)
|
||||
static int write_attempted(struct scoutfs_sb_info *sbi,
|
||||
struct write_attempt *attempt)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
int done = 1;
|
||||
|
||||
spin_lock(&tri->write_lock);
|
||||
if (tri->write_count > attempt->count)
|
||||
attempt->ret = tri->write_ret;
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
if (sbi->trans_write_count > attempt->count)
|
||||
attempt->ret = sbi->trans_write_ret;
|
||||
else
|
||||
done = 0;
|
||||
spin_unlock(&tri->write_lock);
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
|
||||
return done;
|
||||
}
|
||||
@@ -308,12 +268,10 @@ static int write_attempted(struct super_block *sb, struct write_attempt *attempt
|
||||
* We always have delayed sync work pending but the caller wants it
|
||||
* to execute immediately.
|
||||
*/
|
||||
static void queue_trans_work(struct super_block *sb)
|
||||
static void queue_trans_work(struct scoutfs_sb_info *sbi)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
tri->deadline_expired = false;
|
||||
mod_delayed_work(tri->write_workq, &tri->write_work, 0);
|
||||
sbi->trans_deadline_expired = false;
|
||||
mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -326,24 +284,26 @@ static void queue_trans_work(struct super_block *sb)
|
||||
*/
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct write_attempt attempt = { .ret = 0 };
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct write_attempt attempt;
|
||||
int ret;
|
||||
|
||||
|
||||
if (!wait) {
|
||||
queue_trans_work(sb);
|
||||
queue_trans_work(sbi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&tri->write_lock);
|
||||
attempt.count = tri->write_count;
|
||||
spin_unlock(&tri->write_lock);
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
attempt.count = sbi->trans_write_count;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
|
||||
queue_trans_work(sb);
|
||||
queue_trans_work(sbi);
|
||||
|
||||
wait_event(tri->write_wq, write_attempted(sb, &attempt));
|
||||
ret = attempt.ret;
|
||||
ret = wait_event_interruptible(sbi->trans_write_wq,
|
||||
write_attempted(sbi, &attempt));
|
||||
if (ret == 0)
|
||||
ret = attempt.ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -359,10 +319,10 @@ int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
tri->deadline_expired = true;
|
||||
mod_delayed_work(tri->write_workq, &tri->write_work,
|
||||
sbi->trans_deadline_expired = true;
|
||||
mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work,
|
||||
TRANS_SYNC_DELAY);
|
||||
}
|
||||
|
||||
@@ -470,8 +430,8 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* if we're low and can't refill then alloc could empty and return enospc */
|
||||
if (scoutfs_data_alloc_should_refill(sb, SCOUTFS_ALLOC_DATA_REFILL_THRESH)) {
|
||||
/* Try to refill data allocator before premature enospc */
|
||||
if (scoutfs_data_alloc_free_bytes(sb) <= SCOUTFS_TRANS_DATA_ALLOC_LWM) {
|
||||
scoutfs_inc_counter(sb, trans_commit_data_alloc_low);
|
||||
return true;
|
||||
}
|
||||
@@ -479,15 +439,38 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* called as a wait_event condition, needs to be careful to not change
|
||||
* task state and is racing with waking paths that sub_return, test, and
|
||||
* wake.
|
||||
*/
|
||||
static bool holders_no_writer(struct trans_info *tri)
|
||||
static bool acquired_hold(struct super_block *sb)
|
||||
{
|
||||
smp_mb(); /* make sure task in wait_event queue before atomic read */
|
||||
return !(atomic_read(&tri->holders) & TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired;
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
acquired = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait if the writer is blocking holds */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait if we're triggering another commit */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
release_holders(sb);
|
||||
queue_trans_work(sbi);
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_scoutfs_trans_acquired_hold(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
acquired = true;
|
||||
out:
|
||||
return acquired;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -503,65 +486,15 @@ static bool holders_no_writer(struct trans_info *tri)
|
||||
* The writing thread marks itself as a global trans_task which
|
||||
* short-circuits all the hold machinery so it can call code that would
|
||||
* otherwise try to hold transactions while it is writing.
|
||||
*
|
||||
* If the caller is adding metadata items that will eventually consume
|
||||
* free space -- not dirtying existing items or adding deletion items --
|
||||
* then we can return enospc if our metadata allocator indicates that
|
||||
* we're low on space.
|
||||
*/
|
||||
int scoutfs_hold_trans(struct super_block *sb, bool allocing)
|
||||
int scoutfs_hold_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
if (current == tri->task)
|
||||
if (current == sbi->trans_task)
|
||||
return 0;
|
||||
|
||||
for (;;) {
|
||||
/* shouldn't get holders until mount finishes, (not locking for cheap test) */
|
||||
if (WARN_ON_ONCE(sbi->trans_seq == 0)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* wait until the writer work is finished */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
wait_event(tri->hold_wq, holders_no_writer(tri));
|
||||
continue;
|
||||
}
|
||||
|
||||
/* return enospc if server is into reserved blocks and we're allocating */
|
||||
if (allocing && scoutfs_alloc_test_flag(sb, &tri->alloc, SCOUTFS_ALLOC_FLAG_LOW)) {
|
||||
release_holders(sb);
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
/* see if we need to trigger and wait for a commit before holding */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
seq = scoutfs_trans_sample_seq(sb);
|
||||
release_holders(sb);
|
||||
queue_trans_work(sb);
|
||||
wait_event(tri->hold_wq, scoutfs_trans_sample_seq(sb) != seq);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_hold_trans(sb, current->journal_info, atomic_read(&tri->holders), ret);
|
||||
return ret;
|
||||
return wait_event_interruptible(sbi->trans_hold_wq, acquired_hold(sb));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -578,14 +511,15 @@ bool scoutfs_trans_held(void)
|
||||
|
||||
void scoutfs_release_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
if (current == tri->task)
|
||||
if (current == sbi->trans_task)
|
||||
return;
|
||||
|
||||
release_holders(sb);
|
||||
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders), 0);
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -595,13 +529,12 @@ void scoutfs_release_trans(struct super_block *sb)
|
||||
*/
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
u64 ret;
|
||||
|
||||
spin_lock(&tri->write_lock);
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
ret = sbi->trans_seq;
|
||||
spin_unlock(&tri->write_lock);
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -615,17 +548,12 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
if (!tri)
|
||||
return -ENOMEM;
|
||||
|
||||
tri->sb = sb;
|
||||
atomic_set(&tri->holders, 0);
|
||||
scoutfs_block_writer_init(sb, &tri->wri);
|
||||
|
||||
spin_lock_init(&tri->write_lock);
|
||||
INIT_DELAYED_WORK(&tri->write_work, scoutfs_trans_write_func);
|
||||
init_waitqueue_head(&tri->write_wq);
|
||||
init_waitqueue_head(&tri->hold_wq);
|
||||
|
||||
tri->write_workq = alloc_workqueue("scoutfs_trans", WQ_UNBOUND, 1);
|
||||
if (!tri->write_workq) {
|
||||
sbi->trans_write_workq = alloc_workqueue("scoutfs_trans",
|
||||
WQ_UNBOUND, 1);
|
||||
if (!sbi->trans_write_workq) {
|
||||
kfree(tri);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -636,15 +564,8 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
}
|
||||
|
||||
/*
|
||||
* While the vfs will have done an fs level sync before calling
|
||||
* put_super, we may have done work down in our level after all the fs
|
||||
* ops were done. An example is final inode deletion in iput, that's
|
||||
* done in generic_shutdown_super after the sync and before calling our
|
||||
* put_super.
|
||||
*
|
||||
* So we always try to write any remaining dirty transactions before
|
||||
* shutting down. Typically there won't be any dirty data and the
|
||||
* worker will just return.
|
||||
* kill_sb calls sync before getting here so we know that dirty data
|
||||
* should be in flight. We just have to wait for it to quiesce.
|
||||
*/
|
||||
void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
{
|
||||
@@ -652,19 +573,13 @@ void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
if (tri) {
|
||||
if (tri->write_workq) {
|
||||
/* immediately queues pending timer */
|
||||
flush_delayed_work(&tri->write_work);
|
||||
/* prevents re-arming if it has to wait */
|
||||
cancel_delayed_work_sync(&tri->write_work);
|
||||
destroy_workqueue(tri->write_workq);
|
||||
/* trans work schedules after shutdown see null */
|
||||
tri->write_workq = NULL;
|
||||
}
|
||||
|
||||
scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri);
|
||||
scoutfs_block_writer_forget_all(sb, &tri->wri);
|
||||
|
||||
if (sbi->trans_write_workq) {
|
||||
cancel_delayed_work_sync(&sbi->trans_write_work);
|
||||
destroy_workqueue(sbi->trans_write_workq);
|
||||
/* trans work schedules after shutdown see null */
|
||||
sbi->trans_write_workq = NULL;
|
||||
}
|
||||
kfree(tri);
|
||||
sbi->trans_info = NULL;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
#ifndef _SCOUTFS_TRANS_H_
|
||||
#define _SCOUTFS_TRANS_H_
|
||||
|
||||
/* the server will attempt to fill data allocs for each trans */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_HWM (2ULL * 1024 * 1024 * 1024)
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
|
||||
|
||||
void scoutfs_trans_write_func(struct work_struct *work);
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait);
|
||||
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb, bool allocing);
|
||||
int scoutfs_hold_trans(struct super_block *sb);
|
||||
bool scoutfs_trans_held(void);
|
||||
void scoutfs_release_trans(struct super_block *sb);
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb);
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
|
||||
#include "super.h"
|
||||
#include "triggers.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
* We have debugfs files we can write to which arm triggers which
|
||||
@@ -40,10 +39,6 @@ struct scoutfs_triggers {
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_LOG_MERGE_FORCE_FINALIZE_OURS] = "log_merge_force_finalize_ours",
|
||||
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||
};
|
||||
|
||||
@@ -53,7 +48,6 @@ bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)
|
||||
atomic_t *atom;
|
||||
int old;
|
||||
int mem;
|
||||
bool fired;
|
||||
|
||||
BUG_ON(t >= SCOUTFS_TRIGGER_NR);
|
||||
atom = &triggers->atomics[t];
|
||||
@@ -67,12 +61,7 @@ bool scoutfs_trigger_test_and_clear(struct super_block *sb, unsigned int t)
|
||||
mem = atomic_cmpxchg(atom, old, 0);
|
||||
} while (mem && mem != old);
|
||||
|
||||
fired = !!mem;
|
||||
|
||||
if (fired)
|
||||
trace_scoutfs_trigger_fired(sb, names[t]);
|
||||
|
||||
return fired;
|
||||
return !!mem;
|
||||
}
|
||||
|
||||
int scoutfs_setup_triggers(struct super_block *sb)
|
||||
@@ -101,9 +90,13 @@ int scoutfs_setup_triggers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++)
|
||||
debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++) {
|
||||
if (!debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i])) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
@@ -3,10 +3,6 @@
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_LOG_MERGE_FORCE_FINALIZE_OURS,
|
||||
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||
SCOUTFS_TRIGGER_NR,
|
||||
};
|
||||
|
||||
@@ -46,23 +46,6 @@ static struct scoutfs_tseq_entry *tseq_rb_next(struct scoutfs_tseq_entry *ent)
|
||||
return rb_entry(node, struct scoutfs_tseq_entry, node);
|
||||
}
|
||||
|
||||
#ifdef KC_RB_TREE_AUGMENTED_COMPUTE_MAX
|
||||
static bool tseq_compute_total(struct scoutfs_tseq_entry *ent, bool exit)
|
||||
{
|
||||
loff_t total = 1 + tseq_node_total(ent->node.rb_left) +
|
||||
tseq_node_total(ent->node.rb_right);
|
||||
|
||||
if (exit && ent->total == total)
|
||||
return true;
|
||||
|
||||
ent->total = total;
|
||||
return false;
|
||||
}
|
||||
|
||||
RB_DECLARE_CALLBACKS(static, tseq_rb_callbacks, struct scoutfs_tseq_entry,
|
||||
node, total, tseq_compute_total);
|
||||
#else
|
||||
|
||||
static loff_t tseq_compute_total(struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
return 1 + tseq_node_total(ent->node.rb_left) +
|
||||
@@ -70,8 +53,7 @@ static loff_t tseq_compute_total(struct scoutfs_tseq_entry *ent)
|
||||
}
|
||||
|
||||
RB_DECLARE_CALLBACKS(static, tseq_rb_callbacks, struct scoutfs_tseq_entry,
|
||||
node, loff_t, total, tseq_compute_total);
|
||||
#endif
|
||||
node, loff_t, total, tseq_compute_total)
|
||||
|
||||
void scoutfs_tseq_tree_init(struct scoutfs_tseq_tree *tree,
|
||||
scoutfs_tseq_show_t show)
|
||||
@@ -183,13 +165,6 @@ static void *scoutfs_tseq_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
ent = tseq_rb_next(ent);
|
||||
if (ent)
|
||||
*pos = ent->pos;
|
||||
else
|
||||
/*
|
||||
* once we hit the end, *pos is never used, but it has to
|
||||
* be updated to avoid an error in bpf_seq_read()
|
||||
*/
|
||||
(*pos)++;
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
||||
@@ -17,15 +17,4 @@ static inline void down_write_two(struct rw_semaphore *a,
|
||||
down_write_nested(b, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
/*
|
||||
* When returning shrinker counts from scan_objects, we should steer
|
||||
* clear of the magic SHRINK_STOP and SHRINK_EMPTY values, which are near
|
||||
* ~0UL values. Hence, we cap count to ~0L, which is arbitarily high
|
||||
* enough to avoid it.
|
||||
*/
|
||||
static inline long shrinker_min_long(long count)
|
||||
{
|
||||
return min(count, LONG_MAX);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "client.h"
|
||||
#include "volopt.h"
|
||||
|
||||
/*
|
||||
* Volume options are exposed through a sysfs directory. Getting and
|
||||
* setting the values sends rpcs to the server who owns the options in
|
||||
* the super block.
|
||||
*/
|
||||
|
||||
struct volopt_info {
|
||||
struct super_block *sb;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
};
|
||||
|
||||
#define DECLARE_VOLOPT_INFO(sb, name) \
|
||||
struct volopt_info *name = SCOUTFS_SB(sb)->volopt_info
|
||||
#define DECLARE_VOLOPT_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_VOLOPT_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
/*
|
||||
* attribute arrays need to be dense but the options we export could
|
||||
* well become sparse over time. .store and .load are generic and we
|
||||
* have a lookup table to map the attributes array indexes to the number
|
||||
* and name of the option.
|
||||
*/
|
||||
static struct volopt_nr_name {
|
||||
int nr;
|
||||
char *name;
|
||||
} volopt_table[] = {
|
||||
{ SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_NR, "data_alloc_zone_blocks" },
|
||||
};
|
||||
|
||||
/* initialized by setup, pointer array is null terminated */
|
||||
static struct kobj_attribute volopt_attrs[ARRAY_SIZE(volopt_table)];
|
||||
static struct attribute *volopt_attr_ptrs[ARRAY_SIZE(volopt_table) + 1];
|
||||
|
||||
static void get_opt_data(struct kobj_attribute *attr, struct scoutfs_volume_options *volopt,
|
||||
u64 *bit, __le64 **opt)
|
||||
{
|
||||
size_t index = attr - &volopt_attrs[0];
|
||||
int nr = volopt_table[index].nr;
|
||||
|
||||
*bit = 1ULL << nr;
|
||||
*opt = &volopt->set_bits + 1 + nr;
|
||||
}
|
||||
|
||||
static ssize_t volopt_attr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_VOLOPT_INFO_KOBJ(kobj, vinf);
|
||||
struct super_block *sb = vinf->sb;
|
||||
struct scoutfs_volume_options volopt;
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_volopt(sb, &volopt);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
get_opt_data(attr, &volopt, &bit, &opt);
|
||||
|
||||
if (le64_to_cpu(volopt.set_bits) & bit) {
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", le64_to_cpup(opt));
|
||||
} else {
|
||||
buf[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t volopt_attr_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
DECLARE_VOLOPT_INFO_KOBJ(kobj, vinf);
|
||||
struct super_block *sb = vinf->sb;
|
||||
struct scoutfs_volume_options volopt = {0,};
|
||||
u8 chars[32];
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (count == 0)
|
||||
return 0;
|
||||
if (count > sizeof(chars) - 1)
|
||||
return -ERANGE;
|
||||
|
||||
get_opt_data(attr, &volopt, &bit, &opt);
|
||||
|
||||
if (buf[0] == '\n' || buf[0] == '\r') {
|
||||
volopt.set_bits = cpu_to_le64(bit);
|
||||
|
||||
ret = scoutfs_client_clear_volopt(sb, &volopt);
|
||||
} else {
|
||||
memcpy(chars, buf, count);
|
||||
chars[count] = '\0';
|
||||
ret = kstrtoull(chars, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
volopt.set_bits = cpu_to_le64(bit);
|
||||
*opt = cpu_to_le64(val);
|
||||
|
||||
ret = scoutfs_client_set_volopt(sb, &volopt);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
ret = count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The volume option sysfs files are slim shims around RPCs so this
|
||||
* should be called after the client is setup and before it is torn
|
||||
* down.
|
||||
*/
|
||||
int scoutfs_volopt_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct volopt_info *vinf;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* persistent volume options are always a bitmap u64 then the 64 options */
|
||||
BUILD_BUG_ON(sizeof(struct scoutfs_volume_options) != (1 + 64) * 8);
|
||||
|
||||
vinf = kzalloc(sizeof(struct volopt_info), GFP_KERNEL);
|
||||
if (!vinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_sysfs_init_attrs(sb, &vinf->ssa);
|
||||
vinf->sb = sb;
|
||||
sbi->volopt_info = vinf;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(volopt_table); i++) {
|
||||
volopt_attrs[i] = (struct kobj_attribute) {
|
||||
.attr = { .name = volopt_table[i].name, .mode = S_IWUSR | S_IRUGO },
|
||||
.show = volopt_attr_show,
|
||||
.store = volopt_attr_store,
|
||||
};
|
||||
volopt_attr_ptrs[i] = &volopt_attrs[i].attr;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(volopt_table) != ARRAY_SIZE(volopt_attr_ptrs) - 1);
|
||||
volopt_attr_ptrs[i] = NULL;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &vinf->ssa, volopt_attr_ptrs, "volume_options");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_volopt_destroy(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_volopt_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct volopt_info *vinf = SCOUTFS_SB(sb)->volopt_info;
|
||||
|
||||
if (vinf) {
|
||||
scoutfs_sysfs_destroy_attrs(sb, &vinf->ssa);
|
||||
kfree(vinf);
|
||||
sbi->volopt_info = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
#ifndef _SCOUTFS_VOLOPT_H_
|
||||
#define _SCOUTFS_VOLOPT_H_
|
||||
|
||||
int scoutfs_volopt_setup(struct super_block *sb);
|
||||
void scoutfs_volopt_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
1160
kmod/src/wkic.c
1160
kmod/src/wkic.c
File diff suppressed because it is too large
Load Diff
@@ -1,19 +0,0 @@
|
||||
#ifndef _SCOUTFS_WKIC_H_
|
||||
#define _SCOUTFS_WKIC_H_
|
||||
|
||||
#include "format.h"
|
||||
|
||||
typedef int (*wkic_iter_cb_t)(struct scoutfs_key *key, void *val, unsigned int val_len,
|
||||
void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_iterate(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *last,
|
||||
struct scoutfs_key *range_start, struct scoutfs_key *range_end,
|
||||
wkic_iter_cb_t cb, void *cb_arg);
|
||||
int scoutfs_wkic_iterate_stable(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *last, struct scoutfs_key *range_start,
|
||||
struct scoutfs_key *range_end, wkic_iter_cb_t cb, void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_setup(struct super_block *sb);
|
||||
void scoutfs_wkic_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
948
kmod/src/xattr.c
948
kmod/src/xattr.c
File diff suppressed because it is too large
Load Diff
@@ -1,39 +1,25 @@
|
||||
#ifndef _SCOUTFS_XATTR_H_
|
||||
#define _SCOUTFS_XATTR_H_
|
||||
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
indx:1,
|
||||
srch:1,
|
||||
totl:1;
|
||||
};
|
||||
|
||||
extern const struct xattr_handler *scoutfs_xattr_handlers[];
|
||||
|
||||
int scoutfs_xattr_get_locked(struct inode *inode, const char *name, void *buffer, size_t size,
|
||||
struct scoutfs_lock *lck);
|
||||
int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_len,
|
||||
const void *value, size_t size, int flags,
|
||||
const struct scoutfs_xattr_prefix_tags *tgs,
|
||||
struct scoutfs_lock *lck, struct scoutfs_lock *totl_lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
size_t size);
|
||||
int scoutfs_setxattr(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags);
|
||||
int scoutfs_removexattr(struct dentry *dentry, const char *name);
|
||||
ssize_t scoutfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
||||
ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
size_t size, __u32 *hash_pos, __u64 *id_pos,
|
||||
bool e_range, bool show_hidden);
|
||||
|
||||
int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
srch:1;
|
||||
};
|
||||
|
||||
int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
struct scoutfs_xattr_prefix_tags *tgs);
|
||||
|
||||
void scoutfs_xattr_init_totl_key(struct scoutfs_key *key, u64 *name);
|
||||
int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len);
|
||||
|
||||
void scoutfs_xattr_indx_get_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_xattr_init_indx_key(struct scoutfs_key *key, u8 major, u64 minor, u64 ino, u64 xid);
|
||||
void scoutfs_xattr_get_indx_key(struct scoutfs_key *key, u8 *major, u64 *minor, u64 *ino, u64 *xid);
|
||||
void scoutfs_xattr_set_indx_key_xid(struct scoutfs_key *key, u64 xid);
|
||||
|
||||
#endif
|
||||
|
||||
8
tests/.gitignore
vendored
8
tests/.gitignore
vendored
@@ -1,14 +1,6 @@
|
||||
src/*.d
|
||||
src/createmany
|
||||
src/dumb_renameat2
|
||||
src/dumb_setxattr
|
||||
src/handle_cat
|
||||
src/handle_fsetxattr
|
||||
src/bulk_create_paths
|
||||
src/find_xattrs
|
||||
src/stage_tmpfile
|
||||
src/create_xattr_loop
|
||||
src/o_tmpfile_umask
|
||||
src/o_tmpfile_linkat
|
||||
src/mmap_stress
|
||||
src/mmap_validate
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
v2022.05.01-2-g787cd20
|
||||
@@ -1,21 +1,12 @@
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing -I ../kmod/src
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing
|
||||
SHELL := /usr/bin/bash
|
||||
|
||||
# each binary command is built from a single .c file
|
||||
BIN := src/createmany \
|
||||
src/dumb_renameat2 \
|
||||
src/dumb_setxattr \
|
||||
src/handle_cat \
|
||||
src/handle_fsetxattr \
|
||||
src/bulk_create_paths \
|
||||
src/stage_tmpfile \
|
||||
src/find_xattrs \
|
||||
src/create_xattr_loop \
|
||||
src/fragmented_data_extents \
|
||||
src/o_tmpfile_umask \
|
||||
src/o_tmpfile_linkat \
|
||||
src/mmap_stress \
|
||||
src/mmap_validate
|
||||
src/find_xattrs
|
||||
|
||||
DEPS := $(wildcard src/*.d)
|
||||
|
||||
@@ -25,10 +16,8 @@ ifneq ($(DEPS),)
|
||||
-include $(DEPS)
|
||||
endif
|
||||
|
||||
src/mmap_stress: LIBS+=-lpthread
|
||||
|
||||
$(BIN): %: %.c Makefile
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@ $(LIBS)
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
|
||||
@@ -25,9 +25,8 @@ All options can be seen by running with -h.
|
||||
This script is built to test multi-node systems on one host by using
|
||||
different mounts of the same devices. The script creates a fake block
|
||||
device in front of each fs block device for each mount that will be
|
||||
tested. It will create predictable device mapper devices and mounts
|
||||
them on /mnt/test.N. These static device names and mount paths limit
|
||||
the script to a single execution per host.
|
||||
tested. Currently it will create free loop devices and will mount on
|
||||
/mnt/test.[0-9].
|
||||
|
||||
All tests will be run by default. Particular tests can be included or
|
||||
excluded by providing test name regular expressions with the -I and -E
|
||||
@@ -105,19 +104,17 @@ used during the test.
|
||||
|
||||
| Variable | Description | Origin | Example |
|
||||
| ---------------- | ------------------- | --------------- | ----------------- |
|
||||
| T\_MB[0-9] | per-mount meta bdev | created per run | /dev/mapper/\_scoutfs\_test\_meta\_[0-9] |
|
||||
| T\_DB[0-9] | per-mount data bdev | created per run | /dev/mapper/\_scoutfs\_test\_data\_[0-9] |
|
||||
| T\_MB[0-9] | per-mount meta bdev | created per run | /dev/loop0 |
|
||||
| T\_DB[0-9] | per-mount data bdev | created per run | /dev/loop1 |
|
||||
| T\_D[0-9] | per-mount test dir | made for test | /mnt/test.[0-9]/t |
|
||||
| T\_META\_DEVICE | main FS meta bdev | -M | /dev/vda |
|
||||
| T\_DATA\_DEVICE | main FS data bdev | -D | /dev/vdb |
|
||||
| T\_EX\_META\_DEV | scratch meta bdev | -f | /dev/vdd |
|
||||
| T\_EX\_DATA\_DEV | scratch meta bdev | -e | /dev/vdc |
|
||||
| T\_M[0-9] | mount paths | mounted per run | /mnt/test.[0-9]/ |
|
||||
| T\_MODULE | built kernel module | created per run | ../kmod/src/..ko |
|
||||
| T\_NR\_MOUNTS | number of mounts | -n | 3 |
|
||||
| T\_O[0-9] | mount options | created per run | -o server\_addr= |
|
||||
| T\_QUORUM | quorum count | -q | 2 |
|
||||
| T\_EXTRA | per-test file dir | revision ctled | tests/extra/t |
|
||||
| T\_TMP | per-test tmp prefix | made for test | results/tmp/t/tmp |
|
||||
| T\_TMPDIR | per-test tmp dir dir | made for test | results/tmp/t |
|
||||
|
||||
|
||||
@@ -1,882 +0,0 @@
|
||||
Ran:
|
||||
generic/001
|
||||
generic/002
|
||||
generic/004
|
||||
generic/005
|
||||
generic/006
|
||||
generic/007
|
||||
generic/008
|
||||
generic/009
|
||||
generic/011
|
||||
generic/012
|
||||
generic/013
|
||||
generic/014
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/020
|
||||
generic/021
|
||||
generic/022
|
||||
generic/023
|
||||
generic/024
|
||||
generic/025
|
||||
generic/026
|
||||
generic/028
|
||||
generic/029
|
||||
generic/030
|
||||
generic/031
|
||||
generic/032
|
||||
generic/033
|
||||
generic/034
|
||||
generic/035
|
||||
generic/037
|
||||
generic/039
|
||||
generic/040
|
||||
generic/041
|
||||
generic/050
|
||||
generic/052
|
||||
generic/053
|
||||
generic/056
|
||||
generic/057
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/062
|
||||
generic/063
|
||||
generic/064
|
||||
generic/065
|
||||
generic/066
|
||||
generic/067
|
||||
generic/069
|
||||
generic/070
|
||||
generic/071
|
||||
generic/073
|
||||
generic/076
|
||||
generic/078
|
||||
generic/079
|
||||
generic/080
|
||||
generic/081
|
||||
generic/082
|
||||
generic/084
|
||||
generic/086
|
||||
generic/087
|
||||
generic/088
|
||||
generic/090
|
||||
generic/091
|
||||
generic/092
|
||||
generic/094
|
||||
generic/096
|
||||
generic/097
|
||||
generic/098
|
||||
generic/099
|
||||
generic/101
|
||||
generic/104
|
||||
generic/105
|
||||
generic/106
|
||||
generic/107
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/117
|
||||
generic/118
|
||||
generic/119
|
||||
generic/120
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/124
|
||||
generic/126
|
||||
generic/128
|
||||
generic/129
|
||||
generic/130
|
||||
generic/131
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/141
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/169
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/184
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/215
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/221
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/228
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/236
|
||||
generic/237
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/245
|
||||
generic/246
|
||||
generic/247
|
||||
generic/248
|
||||
generic/249
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/257
|
||||
generic/258
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/286
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/294
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/306
|
||||
generic/307
|
||||
generic/308
|
||||
generic/309
|
||||
generic/312
|
||||
generic/313
|
||||
generic/314
|
||||
generic/315
|
||||
generic/316
|
||||
generic/317
|
||||
generic/319
|
||||
generic/322
|
||||
generic/324
|
||||
generic/325
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/335
|
||||
generic/336
|
||||
generic/337
|
||||
generic/341
|
||||
generic/342
|
||||
generic/343
|
||||
generic/346
|
||||
generic/348
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/360
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/375
|
||||
generic/376
|
||||
generic/377
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/389
|
||||
generic/391
|
||||
generic/392
|
||||
generic/393
|
||||
generic/394
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/401
|
||||
generic/402
|
||||
generic/403
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/426
|
||||
generic/427
|
||||
generic/428
|
||||
generic/436
|
||||
generic/437
|
||||
generic/439
|
||||
generic/440
|
||||
generic/443
|
||||
generic/445
|
||||
generic/446
|
||||
generic/448
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/452
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/460
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/477
|
||||
generic/478
|
||||
generic/479
|
||||
generic/480
|
||||
generic/481
|
||||
generic/483
|
||||
generic/485
|
||||
generic/486
|
||||
generic/487
|
||||
generic/488
|
||||
generic/489
|
||||
generic/490
|
||||
generic/491
|
||||
generic/492
|
||||
generic/498
|
||||
generic/499
|
||||
generic/501
|
||||
generic/502
|
||||
generic/503
|
||||
generic/504
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/509
|
||||
generic/510
|
||||
generic/511
|
||||
generic/512
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/523
|
||||
generic/524
|
||||
generic/525
|
||||
generic/526
|
||||
generic/527
|
||||
generic/528
|
||||
generic/529
|
||||
generic/530
|
||||
generic/531
|
||||
generic/533
|
||||
generic/534
|
||||
generic/535
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/547
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/557
|
||||
generic/566
|
||||
generic/567
|
||||
generic/571
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/604
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/611
|
||||
generic/612
|
||||
generic/613
|
||||
generic/614
|
||||
generic/618
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/632
|
||||
generic/634
|
||||
generic/635
|
||||
generic/637
|
||||
generic/638
|
||||
generic/639
|
||||
generic/640
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/676
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Not
|
||||
run:
|
||||
generic/008
|
||||
generic/009
|
||||
generic/012
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/021
|
||||
generic/022
|
||||
generic/025
|
||||
generic/026
|
||||
generic/031
|
||||
generic/033
|
||||
generic/050
|
||||
generic/052
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/063
|
||||
generic/064
|
||||
generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
generic/091
|
||||
generic/094
|
||||
generic/096
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/118
|
||||
generic/119
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/128
|
||||
generic/130
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/312
|
||||
generic/314
|
||||
generic/316
|
||||
generic/317
|
||||
generic/324
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/391
|
||||
generic/392
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/402
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/427
|
||||
generic/439
|
||||
generic/440
|
||||
generic/446
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/485
|
||||
generic/487
|
||||
generic/488
|
||||
generic/491
|
||||
generic/492
|
||||
generic/499
|
||||
generic/501
|
||||
generic/503
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/511
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/528
|
||||
generic/530
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/566
|
||||
generic/567
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/612
|
||||
generic/613
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/635
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Passed all 512 tests
|
||||
@@ -1,44 +0,0 @@
|
||||
generic/003 # missing atime update in buffered read
|
||||
generic/075 # file content mismatch failures (fds, etc)
|
||||
generic/103 # enospc causes trans commit failures
|
||||
generic/108 # mount fails on failing device?
|
||||
generic/112 # file content mismatch failures (fds, etc)
|
||||
generic/213 # enospc causes trans commit failures
|
||||
generic/318 # can't support user namespaces until v5.11
|
||||
generic/321 # requires selinux enabled for '+' in ls?
|
||||
generic/338 # BUG_ON update inode error handling
|
||||
generic/347 # _dmthin_mount doesn't work?
|
||||
generic/356 # swap
|
||||
generic/357 # swap
|
||||
generic/409 # bind mounts not scripted yet
|
||||
generic/410 # bind mounts not scripted yet
|
||||
generic/411 # bind mounts not scripted yet
|
||||
generic/423 # symlink inode size is strlen() + 1 on scoutfs
|
||||
generic/430 # xfs_io copy_range missing in el7
|
||||
generic/431 # xfs_io copy_range missing in el7
|
||||
generic/432 # xfs_io copy_range missing in el7
|
||||
generic/433 # xfs_io copy_range missing in el7
|
||||
generic/434 # xfs_io copy_range missing in el7
|
||||
generic/441 # dm-mapper
|
||||
generic/444 # el9's posix_acl_update_mode is buggy ?
|
||||
generic/467 # open_by_handle ESTALE
|
||||
generic/472 # swap
|
||||
generic/484 # dm-mapper
|
||||
generic/493 # swap
|
||||
generic/494 # swap
|
||||
generic/495 # swap
|
||||
generic/496 # swap
|
||||
generic/497 # swap
|
||||
generic/532 # xfs_io statx attrib_mask missing in el7
|
||||
generic/554 # swap
|
||||
generic/563 # cgroup+loopdev
|
||||
generic/564 # xfs_io copy_range missing in el7
|
||||
generic/565 # xfs_io copy_range missing in el7
|
||||
generic/568 # falloc not resulting in block count increase
|
||||
generic/569 # swap
|
||||
generic/570 # swap
|
||||
generic/620 # dm-hugedisk
|
||||
generic/633 # id-mapped mounts missing in el7
|
||||
generic/636 # swap
|
||||
generic/641 # swap
|
||||
generic/643 # swap
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
#
|
||||
# This fencing script is used for testing clusters of multiple mounts on
|
||||
# a single host. It finds mounts to fence by looking for their rids and
|
||||
# only knows how to "fence" by using forced unmount.
|
||||
#
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# silence error messages
|
||||
quiet_cat()
|
||||
{
|
||||
cat "$@" 2>/dev/null
|
||||
}
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
shopt -s nullglob
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
fs_rid="$(quiet_cat $fs/rid)"
|
||||
nr="$(quiet_cat $fs/data_device_maj_min)"
|
||||
[ ! -d "$fs" -o "$fs_rid" != "$rid" ] && continue
|
||||
|
||||
mnt=$(findmnt -l -n -t scoutfs -o TARGET -S $nr)
|
||||
[ -z "$mnt" ] && continue
|
||||
|
||||
if ! umount -qf "$mnt"; then
|
||||
if [ -d "$fs" ]; then
|
||||
echo_fail "umount -qf $mnt failed"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0
|
||||
@@ -7,9 +7,8 @@ t_status_msg()
|
||||
export T_PASS_STATUS=100
|
||||
export T_SKIP_STATUS=101
|
||||
export T_FAIL_STATUS=102
|
||||
export T_SKIP_PERMITTED_STATUS=103
|
||||
export T_FIRST_STATUS="$T_PASS_STATUS"
|
||||
export T_LAST_STATUS="$T_SKIP_PERMITTED_STATUS"
|
||||
export T_LAST_STATUS="$T_FAIL_STATUS"
|
||||
|
||||
t_pass()
|
||||
{
|
||||
@@ -22,17 +21,6 @@ t_skip()
|
||||
exit $T_SKIP_STATUS
|
||||
}
|
||||
|
||||
#
|
||||
# This exit code is *reserved* for tests that are up-front never going to work
|
||||
# in certain cases. This should be expressly documented per-case and made
|
||||
# abundantly clear before merging. The test itself should document its case.
|
||||
#
|
||||
t_skip_permitted()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
exit $T_SKIP_PERMITTED_STATUS
|
||||
}
|
||||
|
||||
t_fail()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
@@ -47,54 +35,24 @@ t_fail()
|
||||
t_quiet()
|
||||
{
|
||||
echo "# $*" >> "$T_TMPDIR/quiet.log"
|
||||
"$@" >> "$T_TMPDIR/quiet.log" 2>&1 || \
|
||||
"$@" > "$T_TMPDIR/quiet.log" 2>&1 || \
|
||||
t_fail "quiet command failed"
|
||||
}
|
||||
|
||||
#
|
||||
# Quietly run a command during a test. The output is logged but only
|
||||
# the return code is printed, presumably because the output contains
|
||||
# a lot of invocation specific text that is difficult to filter.
|
||||
# redirect test output back to the output of the invoking script intead
|
||||
# of the compared output.
|
||||
#
|
||||
t_rc()
|
||||
{
|
||||
echo "# $*" >> "$T_TMP.rc.log"
|
||||
"$@" >> "$T_TMP.rc.log" 2>&1
|
||||
echo "rc: $?"
|
||||
}
|
||||
|
||||
#
|
||||
# As run, stdout/err are redirected to a file that will be compared with
|
||||
# the stored expected golden output of the test. This redirects
|
||||
# stdout/err in the script to stdout of the invoking run-test. It's
|
||||
# intended to give visible output of tests without being included in the
|
||||
# golden output.
|
||||
#
|
||||
# (see the goofy "exec" fd manipulation in the main run-tests as it runs
|
||||
# each test)
|
||||
#
|
||||
t_stdout_invoked()
|
||||
t_restore_output()
|
||||
{
|
||||
exec >&6 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# This undoes t_stdout_invokved, returning the test's stdout/err to the
|
||||
# output file as it was when it was launched.
|
||||
# redirect a command's output back to the compared output after the
|
||||
# test has restored its output
|
||||
#
|
||||
t_stdout_compare()
|
||||
t_compare_output()
|
||||
{
|
||||
exec >&7 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# usually bash prints an annoying output message when jobs
|
||||
# are killed. We can avoid that by redirecting stderr for
|
||||
# the bash process when it reaps the jobs that are killed.
|
||||
#
|
||||
t_silent_kill() {
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
kill "$@"
|
||||
wait "$@"
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
"$@" >&7 2>&1
|
||||
}
|
||||
|
||||
@@ -6,61 +6,6 @@ t_filter_fs()
|
||||
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
}
|
||||
|
||||
#
|
||||
# We can hit a spurious kasan warning that was fixed upstream:
|
||||
#
|
||||
# e504e74cc3a2 x86/unwind/orc: Disable KASAN checking in the ORC unwinder, part 2
|
||||
#
|
||||
# KASAN can get mad when the unwinder doesn't find ORC metadata and
|
||||
# wanders up without using frames and hits the KASAN stack red zones.
|
||||
# We can ignore these messages.
|
||||
#
|
||||
# They're bracketed by:
|
||||
# [ 2687.690127] ==================================================================
|
||||
# [ 2687.691366] BUG: KASAN: stack-out-of-bounds in get_reg+0x1bc/0x230
|
||||
# ...
|
||||
# [ 2687.706220] ==================================================================
|
||||
# [ 2687.707284] Disabling lock debugging due to kernel taint
|
||||
#
|
||||
# That final lock debugging message may not be included.
|
||||
#
|
||||
ignore_harmless_unwind_kasan_stack_oob()
|
||||
{
|
||||
awk '
|
||||
BEGIN {
|
||||
in_soob = 0
|
||||
soob_nr = 0
|
||||
}
|
||||
( !in_soob && $0 ~ /==================================================================/ ) {
|
||||
in_soob = 1
|
||||
soob_nr = NR
|
||||
saved = $0
|
||||
}
|
||||
( in_soob == 1 && NR == (soob_nr + 1) ) {
|
||||
if (match($0, /KASAN: stack-out-of-bounds in get_reg/) != 0) {
|
||||
in_soob = 2
|
||||
} else {
|
||||
in_soob = 0
|
||||
print saved
|
||||
}
|
||||
saved=""
|
||||
}
|
||||
( in_soob == 2 && $0 ~ /==================================================================/ ) {
|
||||
in_soob = 3
|
||||
soob_nr = NR
|
||||
}
|
||||
( in_soob == 3 && NR > soob_nr && $0 !~ /Disabling lock debugging/ ) {
|
||||
in_soob = 0
|
||||
}
|
||||
( !in_soob ) { print $0 }
|
||||
END {
|
||||
if (saved) {
|
||||
print saved
|
||||
}
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
#
|
||||
# Filter out expected messages. Putting messages here implies that
|
||||
# tests aren't relying on messages to discover failures.. they're
|
||||
@@ -73,7 +18,6 @@ t_filter_dmesg()
|
||||
|
||||
# the kernel can just be noisy
|
||||
re=" used greatest stack depth: "
|
||||
re="$re|sched: RT throttling activated"
|
||||
|
||||
# mkfs/mount checks partition tables
|
||||
re="$re|unknown partition table"
|
||||
@@ -96,7 +40,7 @@ t_filter_dmesg()
|
||||
# mount and unmount spew a bunch
|
||||
re="$re|scoutfs.*client connected"
|
||||
re="$re|scoutfs.*client disconnected"
|
||||
re="$re|scoutfs.*server starting"
|
||||
re="$re|scoutfs.*server setting up"
|
||||
re="$re|scoutfs.*server ready"
|
||||
re="$re|scoutfs.*server accepted"
|
||||
re="$re|scoutfs.*server closing"
|
||||
@@ -108,71 +52,15 @@ t_filter_dmesg()
|
||||
|
||||
# tests that drop unmount io triggers fencing
|
||||
re="$re|scoutfs .* error: fencing "
|
||||
re="$re|scoutfs .*: waiting for .* clients"
|
||||
re="$re|scoutfs .*: all clients recovered"
|
||||
re="$re|scoutfs .*: waiting for .* lock clients"
|
||||
re="$re|scoutfs .*: all lock clients recovered"
|
||||
re="$re|scoutfs .* error: client rid.*lock recovery timed out"
|
||||
|
||||
# we test bad devices and options
|
||||
# some tests mount w/o options
|
||||
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
|
||||
re="$re|scoutfs .* error: meta_super META flag not set"
|
||||
re="$re|scoutfs .* error: could not open metadev:.*"
|
||||
re="$re|scoutfs .* error: Unknown or malformed option,.*"
|
||||
re="$re|scoutfs .* error: invalid quorum_heartbeat_timeout_ms value"
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
re="$re|clocksource: Long readout interval"
|
||||
|
||||
# fencing tests force unmounts and trigger timeouts
|
||||
re="$re|scoutfs .* forcing unmount"
|
||||
re="$re|scoutfs .* reconnect timed out"
|
||||
re="$re|scoutfs .* recovery timeout expired"
|
||||
re="$re|scoutfs .* fencing previous leader"
|
||||
re="$re|scoutfs .* reclaimed resources"
|
||||
re="$re|scoutfs .* quorum .* error"
|
||||
re="$re|scoutfs .* error reading quorum block"
|
||||
re="$re|scoutfs .* error .* writing quorum block"
|
||||
re="$re|scoutfs .* error .* while checking to delete inode"
|
||||
re="$re|scoutfs .* error .*writing btree blocks.*"
|
||||
re="$re|scoutfs .* error .*writing super block.*"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.looping commit del.*upd freeing item"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.final commit del.upd freeing item"
|
||||
re="$re|scoutfs .* error .*reading quorum block.*to update event.*"
|
||||
re="$re|scoutfs .* error.*server failed to bind to.*"
|
||||
re="$re|scoutfs .* critical transaction commit failure.*"
|
||||
|
||||
# ENOLINK (-67) indicates an expected forced unmount error
|
||||
re="$re|scoutfs .* error -67 .*"
|
||||
|
||||
# change-devices causes loop device resizing
|
||||
re="$re|loop: module loaded"
|
||||
re="$re|loop[0-9].* detected capacity change from.*"
|
||||
re="$re|dm-[0-9].* detected capacity change from.*"
|
||||
|
||||
# ignore systemd-journal rotating
|
||||
re="$re|systemd-journald.*"
|
||||
|
||||
# process accounting can be noisy
|
||||
re="$re|Process accounting resumed.*"
|
||||
|
||||
# format vers back/compat tries bad mounts
|
||||
re="$re|scoutfs .* error.*outside of supported version.*"
|
||||
re="$re|scoutfs .* error.*could not get .*super.*"
|
||||
|
||||
# ignore "unsafe core pattern" when xfstests tries to disable cores"
|
||||
re="$re|Unsafe core_pattern used with fs.suid_dumpable=2.*"
|
||||
re="$re|Pipe handler or fully qualified core dump path required.*"
|
||||
re="$re|Set kernel.core_pattern before fs.suid_dumpable.*"
|
||||
|
||||
# perf warning that it adjusted sample rate
|
||||
re="$re|perf: interrupt took too long.*lowering kernel.perf_event_max_sample_rate.*"
|
||||
|
||||
# some ci test guests are unresponsive
|
||||
re="$re|longest quorum heartbeat .* delay"
|
||||
|
||||
# creating block devices may trigger this
|
||||
re="$re|block device autoloading is deprecated and will be removed."
|
||||
|
||||
egrep -v "($re)" | \
|
||||
ignore_harmless_unwind_kasan_stack_oob
|
||||
egrep -v "($re)"
|
||||
}
|
||||
|
||||
@@ -17,24 +17,14 @@ t_sync_seq_index()
|
||||
t_quiet sync
|
||||
}
|
||||
|
||||
t_mount_rid()
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
#
|
||||
t_ident()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
local rid
|
||||
|
||||
rid=$(scoutfs statfs -s rid -p "$mnt")
|
||||
|
||||
echo "$rid"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given path
|
||||
# in a mounted scoutfs volume.
|
||||
#
|
||||
t_ident_from_mnt()
|
||||
{
|
||||
local mnt="$1"
|
||||
local fsid
|
||||
local rid
|
||||
|
||||
@@ -44,38 +34,6 @@ t_ident_from_mnt()
|
||||
echo "f.${fsid:0:6}.r.${rid:0:6}"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
#
|
||||
t_ident()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
|
||||
t_ident_from_mnt "$mnt"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_ident()
|
||||
{
|
||||
local ident="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$ident"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_mnt()
|
||||
{
|
||||
local mnt="$1"
|
||||
|
||||
t_sysfs_path_from_ident $(t_ident_from_mnt $mnt)
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount's sysfs path, defaulting to mount 0 if none is
|
||||
# specified.
|
||||
@@ -84,7 +42,7 @@ t_sysfs_path()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
t_sysfs_path_from_ident $(t_ident $nr)
|
||||
echo "/sys/fs/scoutfs/$(t_ident $nr)"
|
||||
}
|
||||
|
||||
#
|
||||
@@ -106,29 +64,6 @@ t_fs_nrs()
|
||||
seq 0 $((T_NR_MOUNTS - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# output the fs nrs of quorum nodes, we "know" that
|
||||
# the quorum nrs are the first consequtive nrs
|
||||
#
|
||||
t_quorum_nrs()
|
||||
{
|
||||
seq 0 $((T_QUORUM - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# outputs "1" if the fs number has "1" in its quorum/is_leader file.
|
||||
# All other cases output 0, including the fs nr being a client which
|
||||
# won't have a quorum/ dir.
|
||||
#
|
||||
t_fs_is_leader()
|
||||
{
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader 2>/dev/null)" == "1" ]; then
|
||||
echo "1"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount nr of the current server. This takes no steps to
|
||||
# ensure that the server doesn't shut down and have some other mount
|
||||
@@ -137,7 +72,7 @@ t_fs_is_leader()
|
||||
t_server_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(t_fs_is_leader $i)" == "1" ]; then
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "1" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -155,7 +90,7 @@ t_server_nr()
|
||||
t_first_client_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(t_fs_is_leader $i)" == "0" ]; then
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "0" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -184,27 +119,7 @@ t_mount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet mount -t scoutfs \$T_O$nr\$opt \$T_DB$nr \$T_M$nr
|
||||
}
|
||||
|
||||
#
|
||||
# Mount with an optional mount option string. If the string is empty
|
||||
# then the saved mount options are used. If the string has contents
|
||||
# then it is appended to the end of the saved options with a separating
|
||||
# comma.
|
||||
#
|
||||
# Unlike t_mount this won't inherently fail in t_quiet, errors are
|
||||
# returned so bad options can be tested.
|
||||
#
|
||||
t_mount_opt()
|
||||
{
|
||||
local nr="$1"
|
||||
local opt="${2:+,$2}"
|
||||
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval mount -t scoutfs \$T_O$nr\$opt \$T_DB$nr \$T_M$nr
|
||||
eval t_quiet mount -t scoutfs \$T_O$nr \$T_DB$nr \$T_M$nr
|
||||
}
|
||||
|
||||
t_umount()
|
||||
@@ -214,17 +129,7 @@ t_umount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount \$T_M$nr
|
||||
}
|
||||
|
||||
t_force_umount()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount -f \$T_M$nr
|
||||
eval t_quiet umount \$T_M$i
|
||||
}
|
||||
|
||||
#
|
||||
@@ -296,15 +201,6 @@ t_trigger_get() {
|
||||
cat "$(t_trigger_path "$nr")/$which"
|
||||
}
|
||||
|
||||
t_trigger_set() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local val="$3"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo "$val" > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_show() {
|
||||
local which="$1"
|
||||
local string="$2"
|
||||
@@ -316,8 +212,9 @@ t_trigger_show() {
|
||||
t_trigger_arm_silent() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
t_trigger_set "$which" "$nr" 1
|
||||
echo 1 > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
@@ -380,239 +277,3 @@ t_counter_diff_changed() {
|
||||
echo "counter $which didn't change" ||
|
||||
echo "counter $which changed"
|
||||
}
|
||||
|
||||
#
|
||||
# See if we can find a local mount with the caller's rid.
|
||||
#
|
||||
t_rid_is_mounted() {
|
||||
local rid="$1"
|
||||
local fr="$1"
|
||||
|
||||
for fr in /sys/fs/scoutfs/*; do
|
||||
if [ "$(cat $fr/rid)" == "$rid" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# A given mount is being fenced if any mount has a fence request pending
|
||||
# for it which hasn't finished and been removed.
|
||||
#
|
||||
t_rid_is_fencing() {
|
||||
local rid="$1"
|
||||
local fr
|
||||
|
||||
for fr in /sys/fs/scoutfs/*; do
|
||||
if [ -d "$fr/fence/$rid" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# Wait until the mount identified by the first rid arg is not in any
|
||||
# states specified by the remaining state description word args.
|
||||
#
|
||||
t_wait_if_rid_is() {
|
||||
local rid="$1"
|
||||
|
||||
while ( [[ $* =~ mounted ]] && t_rid_is_mounted $rid ) ||
|
||||
( [[ $* =~ fencing ]] && t_rid_is_fencing $rid ) ; do
|
||||
sleep .5
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Wait until any mount identifies itself as the elected leader. We can
|
||||
# be waiting while tests mount and unmount so mounts may not be mounted
|
||||
# at the test's expected mount points.
|
||||
#
|
||||
t_wait_for_leader() {
|
||||
local i
|
||||
|
||||
while sleep .25; do
|
||||
for i in $(t_fs_nrs); do
|
||||
local ldr="$(t_sysfs_path $i 2>/dev/null)/quorum/is_leader"
|
||||
if [ "$(cat $ldr 2>/dev/null)" == "1" ]; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
t_get_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
cat "$opt"
|
||||
}
|
||||
|
||||
t_set_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local val="$3"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
echo "$val" > "$opt" 2>/dev/null
|
||||
}
|
||||
|
||||
t_set_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local val="$2"
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
t_set_sysfs_mount_option $i $name $val
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _saved_opts
|
||||
t_save_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local opt
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
opt="$(t_sysfs_path $i)/mount_options/$name"
|
||||
ind="${name}_${i}"
|
||||
|
||||
_saved_opts[$ind]="$(cat $opt)"
|
||||
done
|
||||
}
|
||||
|
||||
t_restore_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
ind="${name}_${i}"
|
||||
|
||||
t_set_sysfs_mount_option $i $name "${_saved_opts[$ind]}"
|
||||
done
|
||||
}
|
||||
|
||||
t_force_log_merge() {
|
||||
local sv=$(t_server_nr)
|
||||
local merges_started
|
||||
local last_merges_started
|
||||
local merges_completed
|
||||
local last_merges_completed
|
||||
|
||||
while true; do
|
||||
last_merges_started=$(t_counter log_merge_start $sv)
|
||||
last_merges_completed=$(t_counter log_merge_complete $sv)
|
||||
|
||||
t_trigger_arm_silent log_merge_force_finalize_ours $sv
|
||||
|
||||
t_sync_seq_index
|
||||
|
||||
while test "$(t_trigger_get log_merge_force_finalize_ours $sv)" == "1"; do
|
||||
sleep .5
|
||||
done
|
||||
|
||||
merges_started=$(t_counter log_merge_start $sv)
|
||||
|
||||
if (( merges_started > last_merges_started )); then
|
||||
merges_completed=$(t_counter log_merge_complete $sv)
|
||||
|
||||
while (( merges_completed == last_merges_completed )); do
|
||||
sleep .5
|
||||
merges_completed=$(t_counter log_merge_complete $sv)
|
||||
done
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _last_scan
|
||||
t_get_orphan_scan_runs() {
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
_last_scan[$i]=$(t_counter orphan_scan $i)
|
||||
done
|
||||
}
|
||||
|
||||
t_wait_for_orphan_scan_runs() {
|
||||
local i
|
||||
local scan
|
||||
|
||||
t_get_orphan_scan_runs
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
while true; do
|
||||
scan=$(t_counter orphan_scan $i)
|
||||
if (( scan != _last_scan[$i] )); then
|
||||
break
|
||||
fi
|
||||
sleep .5
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _last_empty
|
||||
t_get_orphan_scan_empty() {
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
_last_empty[$i]=$(t_counter orphan_scan_empty $i)
|
||||
done
|
||||
}
|
||||
|
||||
t_wait_for_no_orphans() {
|
||||
local i;
|
||||
local working;
|
||||
local empty;
|
||||
|
||||
t_get_orphan_scan_empty
|
||||
|
||||
while true; do
|
||||
working=0
|
||||
|
||||
t_wait_for_orphan_scan_runs
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
empty=$(t_counter orphan_scan_empty $i)
|
||||
if (( empty == _last_empty[$i] )); then
|
||||
(( working++ ))
|
||||
else
|
||||
(( _last_empty[$i] = empty ))
|
||||
fi
|
||||
done
|
||||
|
||||
if (( working == 0 )); then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Repeatedly run the arguments as a command, sleeping in between, until
|
||||
# it returns success. The first argument is a relative timeout in
|
||||
# seconds. The remaining arguments are the command and its arguments.
|
||||
#
|
||||
# If the timeout expires without the command returning 0 then the test
|
||||
# fails.
|
||||
#
|
||||
t_wait_until_timeout() {
|
||||
local relative="$1"
|
||||
local expire="$((SECONDS + relative))"
|
||||
shift
|
||||
|
||||
while (( SECONDS < expire )); do
|
||||
"$@" && return
|
||||
sleep 1
|
||||
done
|
||||
|
||||
t_fail "command failed for $relative sec: $@"
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
|
||||
#
|
||||
# Generate TAP format test results
|
||||
#
|
||||
|
||||
t_tap_header()
|
||||
{
|
||||
local runid=$1
|
||||
local sequence=( $(echo $tests) )
|
||||
local count=${#sequence[@]}
|
||||
|
||||
# avoid recreating the same TAP result over again - harness sets this
|
||||
[[ -z "$runid" ]] && runid="*test*"
|
||||
|
||||
cat > $T_RESULTS/scoutfs.tap <<TAPEOF
|
||||
TAP version 14
|
||||
1..${count}
|
||||
#
|
||||
# TAP results for run ${runid}
|
||||
#
|
||||
# host/run info:
|
||||
#
|
||||
# hostname: ${HOSTNAME}
|
||||
# test start time: $(date --utc)
|
||||
# uname -r: $(uname -r)
|
||||
# scoutfs commit id: $(git describe --tags)
|
||||
#
|
||||
# sequence for this run:
|
||||
#
|
||||
TAPEOF
|
||||
|
||||
# Sequence
|
||||
for t in ${tests}; do
|
||||
echo ${t/.sh/}
|
||||
done | cat -n | expand | column -c 120 | expand | sed 's/^ /#/' >> $T_RESULTS/scoutfs.tap
|
||||
echo "#" >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
|
||||
t_tap_progress()
|
||||
{
|
||||
(
|
||||
local i=$(( testcount + 1 ))
|
||||
local testname=$1
|
||||
local result=$2
|
||||
|
||||
local stmsg=""
|
||||
local diff=""
|
||||
local dmsg=""
|
||||
|
||||
if [[ -s $T_RESULTS/tmp/${testname}/status.msg ]]; then
|
||||
stmsg="1"
|
||||
fi
|
||||
|
||||
if [[ -s "$T_RESULTS/tmp/${testname}/dmesg.new" ]]; then
|
||||
dmsg="1"
|
||||
fi
|
||||
|
||||
if ! cmp -s golden/${testname} $T_RESULTS/output/${testname}; then
|
||||
diff="1"
|
||||
fi
|
||||
|
||||
if [[ "${result}" == "100" ]] && [[ -z "${dmsg}" ]] && [[ -z "${diff}" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
elif [[ "${result}" == "103" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
echo "# ${testname} ** skipped - permitted **"
|
||||
else
|
||||
echo "not ok ${i} - ${testname}"
|
||||
|
||||
case ${result} in
|
||||
101)
|
||||
echo "# ${testname} ** skipped **"
|
||||
;;
|
||||
102)
|
||||
echo "# ${testname} ** failed **"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n "${stmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# status:"
|
||||
echo "#"
|
||||
cat $T_RESULTS/tmp/${testname}/status.msg | sed 's/^/# - /'
|
||||
fi
|
||||
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "#"
|
||||
echo "# diff:"
|
||||
echo "#"
|
||||
diff -u golden/${testname} $T_RESULTS/output/${testname} | expand | sed 's/^/# /'
|
||||
fi
|
||||
|
||||
if [[ -n "${dmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# dmesg:"
|
||||
echo "#"
|
||||
cat "$T_RESULTS/tmp/${testname}/dmesg.new" | sed 's/^/# /'
|
||||
fi
|
||||
fi
|
||||
) >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
== prepare devices, mount point, and logs
|
||||
== bad devices, bad options
|
||||
== swapped devices
|
||||
== both meta devices
|
||||
== both data devices
|
||||
== good volume, bad option and good options
|
||||
@@ -1,155 +0,0 @@
|
||||
== setup test directory
|
||||
== getfacl
|
||||
directory drwxr-xr-x 0 0 0 '.'
|
||||
# file: .
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
|
||||
== basic non-acl access through permissions
|
||||
directory drwxr-xr-x 0 44444 0 'dir-testuid'
|
||||
touch: cannot touch 'dir-testuid/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-testuid/symlink-file-group-write': Permission denied
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'dir-testuid/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'symlinkdir-testuid/symlink-file-group-write'
|
||||
== basic acl access
|
||||
directory drwxr-xr-x 0 0 0 'dir-root'
|
||||
touch: cannot touch 'dir-root/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-root/file-group-write': Permission denied
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
regular empty file -rw-r--r-- 22222 0 0 'dir-root/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 0 0 'symlinkdir-root/file-group-write'
|
||||
== directory exec
|
||||
Success
|
||||
Success
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Failed
|
||||
Failed
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
group:44444:rwx
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Success
|
||||
Success
|
||||
== get/set attr
|
||||
regular empty file -rw-r--r-- 0 0 0 'file-root'
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rw-
|
||||
group::r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
mask::r--
|
||||
other::r--
|
||||
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
group:44444:rw-
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
user.test4="Success"
|
||||
|
||||
== inheritance / default acl
|
||||
directory drwxr-xr-x 0 0 0 'dir-root2'
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
directory drwxrwxr-x 22222 0 4 'dir-root2/dir'
|
||||
# file: dir-root2/dir
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
regular empty file -rw-rw-r-- 22222 0 0 'dir-root2/dir/file'
|
||||
# file: dir-root2/dir/file
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rwx #effective:rw-
|
||||
group::r-x #effective:r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
== cleanup
|
||||
@@ -47,13 +47,9 @@ four
|
||||
--- dir within dir
|
||||
--- overwrite file
|
||||
--- can't overwrite non-empty dir
|
||||
mv: cannot move '/mnt/test/test/basic-posix-consistency/dir/c/clobber' to '/mnt/test/test/basic-posix-consistency/dir/a/dir': Directory not empty
|
||||
mv: cannot move ‘/mnt/test/test/basic-posix-consistency/dir/c/clobber’ to ‘/mnt/test/test/basic-posix-consistency/dir/a/dir’: Directory not empty
|
||||
--- can overwrite empty dir
|
||||
--- can rename into root
|
||||
== path resoluion
|
||||
== inode indexes match after syncing existing
|
||||
== inode indexes match after copying and syncing
|
||||
== inode indexes match after removing and syncing
|
||||
== concurrent creates make one file
|
||||
one-file
|
||||
== cleanup
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user