mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-11 06:00:19 +00:00
Compare commits
250 Commits
v1.20
...
zab/test_l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ead8be6b8c | ||
|
|
ae84271b37 | ||
|
|
23aaa994df | ||
|
|
7d14b57b2d | ||
|
|
3f252be4be | ||
|
|
a4d25d9b55 | ||
|
|
79cd25f693 | ||
|
|
f2646130ae | ||
|
|
1c66f9a9a5 | ||
|
|
afb6ba00ad | ||
|
|
29e486e411 | ||
|
|
8f3177fe33 | ||
|
|
419079e606 | ||
|
|
6a70ee03b5 | ||
|
|
38a2ffe0c7 | ||
|
|
4b41cf9789 | ||
|
|
102899290e | ||
|
|
89387fb192 | ||
|
|
8b6418fb79 | ||
|
|
206c24c41f | ||
|
|
f67462750b | ||
|
|
fd8aaa0810 | ||
|
|
a5dbe7f286 | ||
|
|
c1e89d597d | ||
|
|
2c4316b096 | ||
|
|
e704cd7074 | ||
|
|
8c5b09aee8 | ||
|
|
d2cd610c53 | ||
|
|
52563d3f73 | ||
|
|
4358d57f55 | ||
|
|
021830ab04 | ||
|
|
3e63739711 | ||
|
|
b25d8e8741 | ||
|
|
4a9760afe0 | ||
|
|
33f6e9d0cd | ||
|
|
f9780fc391 | ||
|
|
aa8517d29b | ||
|
|
feae5757c4 | ||
|
|
e79086f381 | ||
|
|
45e815bf76 | ||
|
|
c313b71b2e | ||
|
|
0ecaceba14 | ||
|
|
b4d8323750 | ||
|
|
aa48a8ccfc | ||
|
|
d277d7e955 | ||
|
|
c72bf915ae | ||
|
|
c3e6f3cd54 | ||
|
|
c19280c83c | ||
|
|
01847d9fb6 | ||
|
|
84a48ed8e2 | ||
|
|
d38e41cb57 | ||
|
|
a896984f59 | ||
|
|
35bcad91a6 | ||
|
|
0b7b9d4a5e | ||
|
|
f86a7b4d3c | ||
|
|
96eb9662a1 | ||
|
|
47af90d078 | ||
|
|
669e37c636 | ||
|
|
bb3e1f3665 | ||
|
|
0d262de4ac | ||
|
|
70bd936213 | ||
|
|
3f786596e0 | ||
|
|
cad47ed1ed | ||
|
|
bf87ea0a1c | ||
|
|
e088424d70 | ||
|
|
d0cf026298 | ||
|
|
03fa1ce7c5 | ||
|
|
3d9f10de93 | ||
|
|
9741d40e10 | ||
|
|
48ac7bdf7c | ||
|
|
7865ee9f54 | ||
|
|
624eb128c6 | ||
|
|
091eb3b683 | ||
|
|
04e8cc6295 | ||
|
|
0f6fdb3eb5 | ||
|
|
2f48a606e8 | ||
|
|
377e49caf1 | ||
|
|
d08eb66adc | ||
|
|
6f19d0bd36 | ||
|
|
1d0cde7cc3 | ||
|
|
138c7c6b49 | ||
|
|
8aa1a98901 | ||
|
|
888b1394a6 | ||
|
|
e457694f19 | ||
|
|
459de5b478 | ||
|
|
24031cde1d | ||
|
|
04cc41719c | ||
|
|
1b47e9429e | ||
|
|
7ea084082d | ||
|
|
f565451f76 | ||
|
|
05f14640fb | ||
|
|
609fc56cd6 | ||
|
|
a4b5a256eb | ||
|
|
f701ce104c | ||
|
|
c6dab3c306 | ||
|
|
e3e2cfceec | ||
|
|
5a10c79409 | ||
|
|
e9d147260c | ||
|
|
6c85879489 | ||
|
|
8b76a53cf3 | ||
|
|
e76a171c40 | ||
|
|
8cb08507d6 | ||
|
|
cad12d5ce8 | ||
|
|
e59a5f8ebd | ||
|
|
1bcd1d4d00 | ||
|
|
b944f609aa | ||
|
|
519b47a53c | ||
|
|
92f704d35a | ||
|
|
311bf75902 | ||
|
|
3788d67101 | ||
|
|
b7a3d03711 | ||
|
|
295f751aed | ||
|
|
7f6032d9b4 | ||
|
|
7e3a6537ec | ||
|
|
49b7b70438 | ||
|
|
de0fdd1f9f | ||
|
|
a6d7de3c00 | ||
|
|
2c2c127c5e | ||
|
|
9491c784e7 | ||
|
|
c3b30930fa | ||
|
|
e7e46a80e6 | ||
|
|
1ddf752f42 | ||
|
|
14b65c6360 | ||
|
|
934f6c7648 | ||
|
|
a88972b50e | ||
|
|
3e71f49260 | ||
|
|
8a082e3f99 | ||
|
|
110d5ea0d5 | ||
|
|
669de459a7 | ||
|
|
621271f8cf | ||
|
|
d1092cdbe9 | ||
|
|
aed7169fac | ||
|
|
7f313f2818 | ||
|
|
6b4e666952 | ||
|
|
4a26059d00 | ||
|
|
19e78c32fc | ||
|
|
8c1a45c9f5 | ||
|
|
5a6eb569f3 | ||
|
|
69d9040e68 | ||
|
|
d94ec29ffa | ||
|
|
70c36ae394 | ||
|
|
1d08a58add | ||
|
|
fc7876e844 | ||
|
|
5337b9e221 | ||
|
|
8a22bdd366 | ||
|
|
235ab133a7 | ||
|
|
9335d2eb86 | ||
|
|
97b081de3f | ||
|
|
21b5032365 | ||
|
|
4723f4f9ab | ||
|
|
0a8b3f4e94 | ||
|
|
8a4b0967cb | ||
|
|
606c519e96 | ||
|
|
7d0e7e29f8 | ||
|
|
69de6d7a74 | ||
|
|
ac00f5cedb | ||
|
|
6d42d260cf | ||
|
|
00ebe92186 | ||
|
|
570c05898c | ||
|
|
c298360a49 | ||
|
|
95f4e56546 | ||
|
|
d5c2768f04 | ||
|
|
676d429264 | ||
|
|
5b260e6b54 | ||
|
|
e2b06f2c92 | ||
|
|
546b437df7 | ||
|
|
381f4543b7 | ||
|
|
418a441604 | ||
|
|
f3abf9710b | ||
|
|
8a45c2baff | ||
|
|
345ebd0876 | ||
|
|
b718cf09de | ||
|
|
e4721366ff | ||
|
|
4ef64c6fcf | ||
|
|
2d58ee2a37 | ||
|
|
1f0dd7f025 | ||
|
|
077468ac1e | ||
|
|
c951713ab2 | ||
|
|
ad82a5e52a | ||
|
|
d3c5328909 | ||
|
|
c30172210f | ||
|
|
19af6e28fb | ||
|
|
8885486bc8 | ||
|
|
0204e092e4 | ||
|
|
3b816cfd01 | ||
|
|
b45fbe0bbb | ||
|
|
c5d9b93b96 | ||
|
|
2984f4d3a8 | ||
|
|
3b8d2eab8e | ||
|
|
4dde57dc27 | ||
|
|
a4be74f4b1 | ||
|
|
b66e52f3f8 | ||
|
|
fb93d82b1e | ||
|
|
9d8ac2c7d7 | ||
|
|
49acbb4415 | ||
|
|
7b039a1d18 | ||
|
|
ccd65b9a61 | ||
|
|
aeb1dbc5f5 | ||
|
|
e20d3ae1e8 | ||
|
|
3228749957 | ||
|
|
db445ce517 | ||
|
|
bb5d98730b | ||
|
|
cb0838a0ef | ||
|
|
7eaed848ed | ||
|
|
267c1cc2d5 | ||
|
|
c6b92329b3 | ||
|
|
91e7f051cf | ||
|
|
7645f04363 | ||
|
|
8c06302984 | ||
|
|
1bc83e9e2d | ||
|
|
38c6d66ffc | ||
|
|
6a17dc335f | ||
|
|
e0bb6ca481 | ||
|
|
38e6f11ee4 | ||
|
|
442980f1c9 | ||
|
|
82c2d0b1d0 | ||
|
|
4a8240748e | ||
|
|
60ca950f42 | ||
|
|
9c45e8b7ef | ||
|
|
ee9e8c3e1a | ||
|
|
5f156b7a36 | ||
|
|
3a51ca369b | ||
|
|
460f3ce503 | ||
|
|
fb5331a1d9 | ||
|
|
5a53e7144d | ||
|
|
a23877b150 | ||
|
|
5ccdf3c9f0 | ||
|
|
270726a6ea | ||
|
|
de304628ea | ||
|
|
6a99ca9ede | ||
|
|
0521bd0e6b | ||
|
|
361491846d | ||
|
|
9ba4271c26 | ||
|
|
90cfaf17d1 | ||
|
|
6931cb7b0e | ||
|
|
7d4db05445 | ||
|
|
7b71250072 | ||
|
|
8e37be279c | ||
|
|
d6642da44d | ||
|
|
4b87045447 | ||
|
|
3f773a8594 | ||
|
|
c385eea9a1 | ||
|
|
c296bc1959 | ||
|
|
3052feac29 | ||
|
|
1fa0d7727c | ||
|
|
2af6f47c8b | ||
|
|
6db69b7a4f | ||
|
|
8ca1f1994d | ||
|
|
48716461e4 | ||
|
|
965b692bdc |
@@ -1,6 +1,86 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.25
|
||||
\
|
||||
*Jun 3, 2025*
|
||||
|
||||
Fix a bug that could cause indefinite retries of failed client commits.
|
||||
Under specific error conditions the client and server's understanding of
|
||||
the current client commit could get out of sync. The client would retry
|
||||
commits indefinitely that could never succeed. This manifested as
|
||||
infinite "critical transaction commit failure" messages in the kernel
|
||||
log on the client and matching "error <nr> committing client logs" on
|
||||
the server.
|
||||
|
||||
Fix a bug in a specific case of server error handling that could result
|
||||
in sending references to unwritten blocks to the client. The client
|
||||
would try to read blocks that hadn't been written and return spurious
|
||||
errors. This was seen under low free space conditions on the server and
|
||||
resulted in error messages with error code 116 (The errno enum for
|
||||
ESTALE, the client's indication that it couldn't read the blocks that it
|
||||
expected.)
|
||||
|
||||
---
|
||||
v1.24
|
||||
\
|
||||
*Mar 14, 2025*
|
||||
|
||||
Add support for coherent read and write mmap() mappings of regular file
|
||||
data between mounts.
|
||||
|
||||
Fix a bug that was causing scoutfs utilities to parse and change some
|
||||
file names before passing them on to the kernel for processing. This
|
||||
fixes spurious scoutfs command errors for files with the offending
|
||||
patterns in their names.
|
||||
|
||||
Fix a bug where rename wasn't updating the ctime of the inode at the
|
||||
destination name if it existed.
|
||||
|
||||
---
|
||||
v1.23
|
||||
\
|
||||
*Dec 11, 2024*
|
||||
|
||||
Add support for kernels in the RHEL 9.5 minor release.
|
||||
|
||||
---
|
||||
v1.22
|
||||
\
|
||||
*Nov 1, 2024*
|
||||
|
||||
Add support for building against the RHEL9 family of kernels.
|
||||
|
||||
Fix failure of the setattr\_more ioctl() to set the attributes of a
|
||||
zero-length file when restoring.
|
||||
|
||||
Fix support for POSIX ACLs in the RHEL8 and later family of kernels.
|
||||
|
||||
Fix a race condition in the lock server that could drop lock requests
|
||||
under heavy load and cause cluster lock attempts to hang.
|
||||
|
||||
---
|
||||
v1.21
|
||||
\
|
||||
*Jul 1, 2024*
|
||||
|
||||
This release adds features that rely on incompatible changes to
|
||||
structure the file system. The process of advancing the format version
|
||||
to enable these features is described in scoutfs(5).
|
||||
|
||||
Added the ".indx." extended attribute tag which can be used to determine
|
||||
the sorting of files in a global index.
|
||||
|
||||
Added ScoutFS quotas which let rules define file size and count limits
|
||||
in terms of ".totl." extended attribute totals.
|
||||
|
||||
Added the project ID file attribute which is inherited from parent
|
||||
directories on creation. ScoutFS quota rules can reference project IDs.
|
||||
|
||||
Add a retention attribute for files which prevents modification once
|
||||
enabled.
|
||||
|
||||
---
|
||||
v1.20
|
||||
\
|
||||
|
||||
@@ -5,13 +5,6 @@ ifeq ($(SK_KSRC),)
|
||||
SK_KSRC := $(shell echo /lib/modules/`uname -r`/build)
|
||||
endif
|
||||
|
||||
# fail if sparse fails if we find it
|
||||
ifeq ($(shell sparse && echo found),found)
|
||||
SP =
|
||||
else
|
||||
SP = @:
|
||||
endif
|
||||
|
||||
SCOUTFS_GIT_DESCRIBE ?= \
|
||||
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
|
||||
echo no-git)
|
||||
@@ -36,9 +29,7 @@ TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
all: module
|
||||
|
||||
module:
|
||||
$(MAKE) $(SCOUTFS_ARGS)
|
||||
$(SP) $(MAKE) C=2 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
|
||||
$(MAKE) CHECK=$(CURDIR)/src/sparse-filtered.sh C=1 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
|
||||
modules_install:
|
||||
$(MAKE) $(SCOUTFS_ARGS) modules_install
|
||||
|
||||
@@ -4,17 +4,13 @@
|
||||
%define kmod_git_describe @@GITDESCRIBE@@
|
||||
%define pkg_date %(date +%%Y%%m%%d)
|
||||
|
||||
# Disable the building of the debug package(s).
|
||||
%define debug_package %{nil}
|
||||
|
||||
# take kernel version or default to uname -r
|
||||
%{!?kversion: %global kversion %(uname -r)}
|
||||
%global kernel_version %{kversion}
|
||||
|
||||
%if 0%{?el7}
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}.$(arch)
|
||||
%endif
|
||||
%if 0%{?el8}
|
||||
%else
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}
|
||||
%endif
|
||||
|
||||
@@ -22,8 +18,7 @@
|
||||
|
||||
%if 0%{?el7}
|
||||
Name: %{kmod_name}
|
||||
%endif
|
||||
%if 0%{?el8}
|
||||
%else
|
||||
Name: kmod-%{kmod_name}
|
||||
%endif
|
||||
Summary: %{kmod_name} kernel module
|
||||
@@ -35,8 +30,7 @@ URL: http://scoutfs.org/
|
||||
|
||||
%if 0%{?el7}
|
||||
BuildRequires: %{kernel_module_package_buildreqs}
|
||||
%endif
|
||||
%if 0%{?el8}
|
||||
%else
|
||||
BuildRequires: elfutils-libelf-devel
|
||||
%endif
|
||||
BuildRequires: kernel-devel-uname-r = %{kernel_version}
|
||||
@@ -54,10 +48,23 @@ Source: %{kmod_name}-kmod-%{kmod_version}.tar
|
||||
%endif
|
||||
|
||||
%global install_mod_dir extra/%{kmod_name}
|
||||
%if 0%{?el8}
|
||||
|
||||
%if ! 0%{?el7}
|
||||
%global flavors_to_build x86_64
|
||||
%endif
|
||||
|
||||
# el9 sanity: make sure we lock to the minor release we built for and block upgrades
|
||||
%{lua:
|
||||
if string.match(rpm.expand("%{dist}"), "%.el9") then
|
||||
rpm.define("el9 1")
|
||||
end
|
||||
}
|
||||
|
||||
%if 0%{?el9}
|
||||
%define release_major_minor 9.%{lua: print(rpm.expand("%{dist}"):match("%.el9_(%d)"))}
|
||||
Requires: system-release = %{release_major_minor}
|
||||
%endif
|
||||
|
||||
%description
|
||||
%{kmod_name} - kernel module
|
||||
|
||||
@@ -93,7 +100,7 @@ done
|
||||
# mark modules executable so that strip-to-file can strip them
|
||||
find %{buildroot} -type f -name \*.ko -exec %{__chmod} u+x \{\} \;
|
||||
|
||||
%if 0%{?el8}
|
||||
%if ! 0%{?el7}
|
||||
%files
|
||||
/lib/modules
|
||||
|
||||
@@ -111,8 +118,5 @@ SCOUTFS_RPM_NAME=$(rpm -q %{name} | grep "%{version}-%{release}")
|
||||
rpm -ql $SCOUTFS_RPM_NAME | grep '\.ko$' > /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
%postun
|
||||
if [ -x /sbin/weak-modules ]; then
|
||||
cat /var/run/%{name}-modules-%{version}-%{release} | /sbin/weak-modules --remove-modules --no-initramfs
|
||||
fi
|
||||
|
||||
cat /var/run/%{name}-modules-%{version}-%{release} | weak-modules --remove-modules --no-initramfs
|
||||
rm /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
@@ -9,6 +9,7 @@ CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
|
||||
|
||||
scoutfs-y += \
|
||||
acl.o \
|
||||
attr_x.o \
|
||||
avl.o \
|
||||
alloc.o \
|
||||
block.o \
|
||||
@@ -34,6 +35,7 @@ scoutfs-y += \
|
||||
options.o \
|
||||
per_task.o \
|
||||
quorum.o \
|
||||
quota.o \
|
||||
recov.o \
|
||||
scoutfs_trace.o \
|
||||
server.o \
|
||||
@@ -42,10 +44,12 @@ scoutfs-y += \
|
||||
srch.o \
|
||||
super.o \
|
||||
sysfs.o \
|
||||
totl.o \
|
||||
trans.o \
|
||||
triggers.o \
|
||||
tseq.o \
|
||||
volopt.o \
|
||||
wkic.o \
|
||||
xattr.o
|
||||
|
||||
#
|
||||
|
||||
@@ -6,26 +6,6 @@
|
||||
|
||||
ccflags-y += -include $(src)/kernelcompat.h
|
||||
|
||||
#
|
||||
# v3.10-rc6-21-gbb6f619b3a49
|
||||
#
|
||||
# _readdir changes from fop->readdir() to fop->iterate() and from
|
||||
# filldir(dirent) to dir_emit(ctx).
|
||||
#
|
||||
ifneq (,$(shell grep 'iterate.*dir_context' include/linux/fs.h))
|
||||
ccflags-y += -DKC_ITERATE_DIR_CONTEXT
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.10-rc6-23-g5f99f4e79abc
|
||||
#
|
||||
# Helpers including dir_emit_dots() are added in the process of
|
||||
# switching dcache_readdir() from fop->readdir() to fop->iterate()
|
||||
#
|
||||
ifneq (,$(shell grep 'dir_emit_dots' include/linux/fs.h))
|
||||
ccflags-y += -DKC_DIR_EMIT_DOTS
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.18-rc2-19-gb5ae6b15bd73
|
||||
#
|
||||
@@ -78,8 +58,9 @@ endif
|
||||
# v4.8-rc1-29-g31051c85b5e2
|
||||
#
|
||||
# inode_change_ok() removed - replace with setattr_prepare()
|
||||
# v5.11-rc4-7-g2f221d6f7b88 removes extern attribute
|
||||
#
|
||||
ifneq (,$(shell grep 'extern int setattr_prepare' include/linux/fs.h))
|
||||
ifneq (,$(shell grep 'int setattr_prepare' include/linux/fs.h))
|
||||
ccflags-y += -DKC_SETATTR_PREPARE
|
||||
endif
|
||||
|
||||
@@ -177,21 +158,12 @@ ifneq (,$(shell grep 'sock_create_kern.*struct net' include/linux/net.h))
|
||||
ccflags-y += -DKC_SOCK_CREATE_KERN_NET=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.18-rc6-1619-gc0371da6047a
|
||||
#
|
||||
# iov_iter is now part of struct msghdr
|
||||
#
|
||||
ifneq (,$(shell grep 'struct iov_iter.*msg_iter' include/linux/socket.h))
|
||||
ccflags-y += -DKC_MSGHDR_STRUCT_IOV_ITER=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.17-rc6-7-g95582b008388
|
||||
#
|
||||
# Kernel has current_time(inode) to uniformly retreive timespec in the right unit
|
||||
#
|
||||
ifneq (,$(shell grep 'extern struct timespec64 current_time' include/linux/fs.h))
|
||||
ifneq (,$(shell grep 'struct timespec64 current_time' include/linux/fs.h))
|
||||
ccflags-y += -DKC_CURRENT_TIME_INODE=1
|
||||
endif
|
||||
|
||||
@@ -258,3 +230,259 @@ endif
|
||||
ifneq (,$(shell grep 'static inline const char .xattr_prefix' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_HANDLER_NAME=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-96-g342a72a33407
|
||||
#
|
||||
# Adds `typedef __u32 __bitwise blk_opf_t` to aid flag checking
|
||||
ifneq (,$(shell grep 'typedef __u32 __bitwise blk_opf_t' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_OPF_T=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.12-rc6-9-g4f0f586bf0c8
|
||||
#
|
||||
# list_sort cmp function takes const list_head args
|
||||
ifneq (,$(shell grep 'const struct list_head ., const struct list_head .' include/linux/list_sort.h))
|
||||
ccflags-y += -DKC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
endif
|
||||
|
||||
# v5.7-523-g88dca4ca5a93
|
||||
#
|
||||
# The pgprot argument to vmalloc is always PAGE_KERNEL, so it is removed.
|
||||
ifneq (,$(shell grep 'extern void .__vmalloc.unsigned long size, gfp_t gfp_mask, pgprot_t prot' include/linux/vmalloc.h))
|
||||
ccflags-y += -DKC_VMALLOC_PGPROT_T
|
||||
endif
|
||||
|
||||
# v6.2-rc1-18-g01beba7957a2
|
||||
#
|
||||
# fs: port inode_owner_or_capable() to mnt_idmap
|
||||
ifneq (,$(shell grep 'bool inode_owner_or_capable.struct user_namespace .mnt_userns' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_OWNER_OR_CAPABLE_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-5-g47291baa8ddf
|
||||
#
|
||||
# namei: make permission helpers idmapped mount aware
|
||||
ifneq (,$(shell grep 'int inode_permission.struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_PERMISSION_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-24-g549c7297717c
|
||||
#
|
||||
# fs: make helpers idmap mount aware
|
||||
# Enlarges the VFS API methods to include user namespace argument.
|
||||
ifneq (,$(shell grep 'int ..mknod. .struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.2-rc1-2-gabf08576afe3
|
||||
#
|
||||
# fs: vfs methods use struct mnt_idmap instead of struct user_namespace
|
||||
ifneq (,$(shell grep 'int vfs_mknod.struct mnt_idmap' include/linux/fs.h))
|
||||
ccflags-y += -DKC_VFS_METHOD_MNT_IDMAP_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-rc2-21-g07888c665b40
|
||||
#
|
||||
# Detect new style bio_alloc - pass bdev and opf.
|
||||
ifneq (,$(shell grep 'struct bio .bio_alloc.struct block_device .bdev' include/linux/bio.h))
|
||||
ccflags-y += -DKC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc4-53-gcddf8a2c4a82
|
||||
#
|
||||
# fiemap_prep() replaces fiemap_check_flags()
|
||||
ifneq (,$(shell grep -s 'int fiemap_prep.struct inode' include/linux/fiemap.h))
|
||||
ccflags-y += -DKC_FIEMAP_PREP
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-13043-g800ba29547e1
|
||||
#
|
||||
# generic_perform_write args use kiocb for passing filp and pos
|
||||
ifneq (,$(shell grep 'ssize_t generic_perform_write.struct kiocb ., struct iov_iter' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2496-g76ee0785f42a
|
||||
#
|
||||
# net: add sock_set_sndtimeo
|
||||
ifneq (,$(shell grep 'void sock_set_sndtimeo.struct sock' include/net/sock.h))
|
||||
ccflags-y += -DKC_SOCK_SET_SNDTIMEO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.8-rc4-1931-gba423fdaa589
|
||||
#
|
||||
# setsockopt functions are now passed a sockptr_t value instead of char*
|
||||
ifneq (,$(shell grep -s 'include .linux/sockptr.h.' include/linux/net.h))
|
||||
ccflags-y += -DKC_SETSOCKOPT_SOCKPTR_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2507-g71c48eb81c9e
|
||||
#
|
||||
# Adds a bunch of low level TCP sock parameter functions that we want to use.
|
||||
ifneq (,$(shell grep 'int tcp_sock_set_keepintvl' include/linux/tcp.h))
|
||||
ccflags-y += -DKC_HAVE_TCP_SET_SOCKFN
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc3-13-ga84d1169164b
|
||||
#
|
||||
# Fixes y2038 issues with struct timeval.
|
||||
ifneq (,$(shell grep -s '^struct __kernel_old_timeval .' include/uapi/linux/time_types.h))
|
||||
ccflags-y += -DKC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-52-ge33c267ab70d
|
||||
#
|
||||
# register_shrinker now requires a name, used for debug stats etc.
|
||||
ifneq (,$(shell grep 'int __printf.*register_shrinker.struct shrinker .shrinker,' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_NAME
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-246-gf132ab7d3ab0
|
||||
#
|
||||
# mpage_readpage() is now replaced with mpage_read_folio.
|
||||
ifneq (,$(shell grep 'int mpage_read_folio.struct folio .folio' include/linux/mpage.h))
|
||||
ccflags-y += -DKC_MPAGE_READ_FOLIO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-219-gb3992d1e2ebc
|
||||
#
|
||||
# block_write_begin() no longer is being passed aop_flags
|
||||
ifneq (,$(shell grep -C1 'int block_write_begin' include/linux/buffer_head.h | tail -n 2 | grep 'unsigned flags'))
|
||||
ccflags-y += -DKC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.0-rc6-9-g863f144f12ad
|
||||
#
|
||||
# the .tmpfile() vfs method calling convention changed and now a struct
|
||||
# file* is passed to this metiond instead of a dentry. The function also
|
||||
# should open the created file and call finish_open_simple() before returning.
|
||||
ifneq (,$(shell grep 'extern void d_tmpfile.struct dentry' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_TMPFILE_DENTRY
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-201-g0733ad800291
|
||||
#
|
||||
# New blk_mode_t replaces abuse of fmode_t
|
||||
ifneq (,$(shell grep 'typedef unsigned int __bitwise blk_mode_t' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_MODE_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-186-g2736e8eeb0cc
|
||||
#
|
||||
# Reworks FMODE_EXCL kludge and instead modifies the blkdev_put() call to pass in
|
||||
# the (exclusive) holder to implement FMODE_EXCL handling.
|
||||
ifneq (,$(shell grep 'blkdev_put.struct block_device .bdev, void .holder' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BLKDEV_PUT_HOLDER_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc4-163-g0d625446d0a4
|
||||
#
|
||||
# Entirely removes current->backing_dev_info to ultimately remove buffer_head
|
||||
# completely at some point.
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info;' include/linux/sched.h))
|
||||
ccflags-y += -DKC_CURRENT_BACKING_DEV_INFO
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.8-rc1-4-gf3a608827d1f
|
||||
#
|
||||
# adds bdev_file_open_by_path() and later in v6.8-rc1-30-ge97d06a46526 removes bdev_open_by_path()
|
||||
# which requires us to use the file method from now on.
|
||||
ifneq (,$(shell grep 'struct file.*bdev_file_open_by_path.const char.*path' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BDEV_FILE_OPEN_BY_PATH
|
||||
endif
|
||||
|
||||
# v4.0-rc7-1796-gfe0f07d08ee3
|
||||
#
|
||||
# direct-io changes modify inode_dio_done to now be called inode_dio_end
|
||||
ifneq (,$(shell grep 'void inode_dio_end.struct inode' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_DIO_END
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.0-6476-g3d3539018d2c
|
||||
#
|
||||
# page fault handlers return a bitmask vm_fault_t instead
|
||||
# Note: el8's header has a slightly modified prefix here
|
||||
ifneq (,$(shell grep 'typedef.*__bitwise unsigned.*int vm_fault_t' include/linux/mm_types.h))
|
||||
ccflags-y += -DKC_MM_VM_FAULT_T
|
||||
endif
|
||||
|
||||
# v3.19-499-gd83a08db5ba6
|
||||
#
|
||||
# .remap pages becomes obsolete
|
||||
ifneq (,$(shell grep 'int ..remap_pages..struct vm_area_struct' include/linux/mm.h))
|
||||
ccflags-y += -DKC_MM_REMAP_PAGES
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4742-g503c358cf192
|
||||
#
|
||||
# list_lru_shrink_count() and list_lru_shrink_walk() introduced
|
||||
#
|
||||
ifneq (,$(shell grep 'list_lru_shrink_count.*struct list_lru' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_SHRINK_COUNT_WALK
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4757-g3f97b163207c
|
||||
#
|
||||
# lru_list_walk_cb lru arg added
|
||||
#
|
||||
ifneq (,$(shell grep 'struct list_head \*item, spinlock_t \*lock, void \*cb_arg' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_WALK_CB_ITEM_LOCK
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.7-rc4-153-g0a97c01cd20b
|
||||
#
|
||||
# list_lru_{add,del} -> list_lru_{add,del}_obj
|
||||
#
|
||||
ifneq (,$(shell grep '^bool list_lru_add_obj' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_ADD_OBJ
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.12-rc6-227-gda0c02516c50
|
||||
#
|
||||
# lru_list_walk_cb lock arg removed
|
||||
#
|
||||
ifneq (,$(shell grep 'struct list_lru_one \*list, spinlock_t \*lock, void \*cb_arg' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_WALK_CB_LIST_LOCK
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.1-rc4-273-ge9b98e162aa5
|
||||
#
|
||||
# introduce stack trace helpers
|
||||
#
|
||||
ifneq (,$(shell grep '^unsigned int stack_trace_save' include/linux/stacktrace.h))
|
||||
ccflags-y += -DKC_STACK_TRACE_SAVE
|
||||
endif
|
||||
|
||||
# v6.1-rc1-4-g7420332a6ff4
|
||||
#
|
||||
# .get_acl() method now has dentry arg (and mnt_idmap). The old get_acl has been renamed
|
||||
# to get_inode_acl() and is still available as well, but has an extra rcu param.
|
||||
ifneq (,$(shell grep 'struct posix_acl ...get_acl..struct mnt_idmap ., struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GET_ACL_DENTRY
|
||||
endif
|
||||
|
||||
@@ -98,19 +98,24 @@ struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct s
|
||||
acl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
/* can set null negative cache */
|
||||
if (!IS_ERR(acl))
|
||||
set_cached_acl(inode, type, acl);
|
||||
#endif
|
||||
|
||||
kfree(value);
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
struct posix_acl *scoutfs_get_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, int type)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
#else
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
#endif
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct posix_acl *acl;
|
||||
@@ -155,7 +160,8 @@ int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
if (acl) {
|
||||
ret = posix_acl_update_mode(inode, &new_mode, &acl);
|
||||
ret = posix_acl_update_mode(KC_VFS_INIT_NS
|
||||
inode, &new_mode, &acl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
set_mode = true;
|
||||
@@ -194,18 +200,23 @@ int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
}
|
||||
|
||||
out:
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!ret)
|
||||
set_cached_acl(inode, type, acl);
|
||||
#endif
|
||||
|
||||
kfree(value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct posix_acl *acl, int type)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
#else
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
#endif
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
@@ -243,7 +254,12 @@ int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value,
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
acl = scoutfs_get_acl(KC_VFS_INIT_NS
|
||||
dentry, type);
|
||||
#else
|
||||
acl = scoutfs_get_acl(dentry->d_inode, type);
|
||||
#endif
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl == NULL)
|
||||
@@ -256,7 +272,9 @@ int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value,
|
||||
}
|
||||
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *handler, struct dentry *dentry,
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *handler,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags)
|
||||
{
|
||||
@@ -269,7 +287,7 @@ int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *v
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret;
|
||||
|
||||
if (!inode_owner_or_capable(dentry->d_inode))
|
||||
if (!inode_owner_or_capable(KC_VFS_INIT_NS dentry->d_inode))
|
||||
return -EPERM;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
@@ -287,7 +305,11 @@ int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *v
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
ret = scoutfs_set_acl(KC_VFS_INIT_NS dentry, acl, type);
|
||||
#else
|
||||
ret = scoutfs_set_acl(dentry->d_inode, acl, type);
|
||||
#endif
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
#ifndef _SCOUTFS_ACL_H_
|
||||
#define _SCOUTFS_ACL_H_
|
||||
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
struct posix_acl *scoutfs_get_acl(KC_VFS_NS_DEF struct dentry *dentry, int type);
|
||||
int scoutfs_set_acl(KC_VFS_NS_DEF struct dentry *dentry, struct posix_acl *acl, int type);
|
||||
#else
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type);
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock);
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
#endif
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock);
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size);
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *, struct dentry *dentry,
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags);
|
||||
#else
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
@@ -85,18 +86,47 @@ static u64 smallest_order_length(u64 len)
|
||||
}
|
||||
|
||||
/*
|
||||
* An extent modification dirties three distinct leaves of an allocator
|
||||
* btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent. Dirtying the paths to these
|
||||
* leaves can grow the tree and grow/shrink neighbours at each level.
|
||||
* We over-estimate the number of blocks allocated and freed (the paths
|
||||
* share a root, growth doesn't free) to err on the simpler and safer
|
||||
* side. The overhead is minimal given the relatively large list blocks
|
||||
* and relatively short allocator trees.
|
||||
* Moving an extent between trees can dirty blocks in several ways. This
|
||||
* function calculates worst case number of blocks across these scenarions.
|
||||
* We treat the alloc and free counts independently, so the values below are
|
||||
* max(allocated, freed), not the sum.
|
||||
*
|
||||
* We track extents with two separate btree items: by block number and by size.
|
||||
*
|
||||
* If we're removing an extent from the btree (allocating), we can dirty
|
||||
* two blocks if the keys are in different leaves. If we wind up merging
|
||||
* leaves because we fall below the low water mark, we can wind up freeing
|
||||
* three leaves.
|
||||
*
|
||||
* That sequence is as follows, assuming the original keys are removed from
|
||||
* blocks A and B:
|
||||
*
|
||||
* Allocate new dirty A' and B'
|
||||
* Free old stable A and B
|
||||
* B' has fallen below the low water mark, so copy B' into A'
|
||||
* Free B'
|
||||
*
|
||||
* An extent insertion (freeing an extent) can dirty up to five distinct items
|
||||
* in the btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent:
|
||||
*
|
||||
* In the by-blkno portion of the btree, we can dirty (allocate for COW) up
|
||||
* to two blocks- either by merging adjacent extents, which can cause us to
|
||||
* join leaf blocks; or by an insertion that causes a split.
|
||||
*
|
||||
* In the by-size portion, we never merge extents, so normally we just dirty
|
||||
* a single item with a size insertion. But if we merged adjacent extents in
|
||||
* the by-blkno portion of the tree, we might be working with three by-sizex
|
||||
* items: removing the two old ones that were combined in the merge; and
|
||||
* adding the new one for the larger, merged size.
|
||||
*
|
||||
* Finally, dirtying the paths to these leaves can grow the tree and grow/shrink
|
||||
* neighbours at each level, so we multiply by the height of the tree after
|
||||
* accounting for a possible new level.
|
||||
*/
|
||||
static u32 extent_mod_blocks(u32 height)
|
||||
{
|
||||
return ((1 + height) * 2) * 3;
|
||||
return ((1 + height) * 3) * 5;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -827,7 +857,7 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
struct scoutfs_extent ext = {0,};
|
||||
u64 start;
|
||||
u64 len;
|
||||
int nr;
|
||||
|
||||
252
kmod/src/attr_x.c
Normal file
252
kmod/src/attr_x.c
Normal file
@@ -0,0 +1,252 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "lock.h"
|
||||
#include "trans.h"
|
||||
#include "attr_x.h"
|
||||
|
||||
static int validate_attr_x_input(struct super_block *sb, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX__UNKNOWN) ||
|
||||
(iax->x_flags & SCOUTFS_IOC_IAX_F__UNKNOWN))
|
||||
return -EINVAL;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_RETENTION)))
|
||||
return ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID)))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mask indicates interest in the given attr then set the field
|
||||
* to the caller's value and return the new size if it didn't already
|
||||
* include the attr field.
|
||||
*/
|
||||
#define fill_attr(size, iax, bit, field, val) \
|
||||
({ \
|
||||
__typeof__(iax) _iax = (iax); \
|
||||
__typeof__(size) _size = (size); \
|
||||
\
|
||||
if (_iax->x_mask & (bit)) { \
|
||||
_iax->field = (val); \
|
||||
_size = max(_size, offsetof(struct scoutfs_ioctl_inode_attr_x, field) + \
|
||||
sizeof_field(struct scoutfs_ioctl_inode_attr_x, field)); \
|
||||
} \
|
||||
\
|
||||
_size; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Returns -errno on error, or >= number of bytes filled by the
|
||||
* response. 0 can be returned if no attributes are requested in the
|
||||
* input x_mask.
|
||||
*/
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
size_t size = 0;
|
||||
u64 offline;
|
||||
u64 online;
|
||||
u64 bits;
|
||||
int ret;
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_META_SEQ,
|
||||
meta_seq, scoutfs_inode_meta_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_SEQ,
|
||||
data_seq, scoutfs_inode_data_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_VERSION,
|
||||
data_version, scoutfs_inode_data_version(inode));
|
||||
if (iax->x_mask & (SCOUTFS_IOC_IAX_ONLINE_BLOCKS | SCOUTFS_IOC_IAX_OFFLINE_BLOCKS)) {
|
||||
scoutfs_inode_get_onoff(inode, &online, &offline);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_ONLINE_BLOCKS,
|
||||
online_blocks, online);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_OFFLINE_BLOCKS,
|
||||
offline_blocks, offline);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_sec, inode->i_ctime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_nsec, inode->i_ctime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_sec, si->crtime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_nsec, si->crtime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_SIZE, size, i_size_read(inode));
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX__BITS) {
|
||||
bits = 0;
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(scoutfs_inode_get_flags(inode) & SCOUTFS_INO_FLAG_RETENTION))
|
||||
bits |= SCOUTFS_IOC_IAX_B_RETENTION;
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX__BITS, bits, bits);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_PROJECT_ID,
|
||||
project_id, scoutfs_inode_get_proj(inode));
|
||||
|
||||
ret = size;
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valid_attr_changes(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
/* provided data_version must be non-zero */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version == 0))
|
||||
return false;
|
||||
|
||||
/* can only set size or data version in new regular files */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) ||
|
||||
(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)) &&
|
||||
(!S_ISREG(inode->i_mode) || scoutfs_inode_data_version(inode) != 0))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero data_version with non-zero size */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) && (iax->size > 0)) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) || (iax->data_version == 0)))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero size when setting offline extents to that size */
|
||||
if ((iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_SIZE) || (iax->size == 0)))
|
||||
return false;
|
||||
|
||||
/* the retention bit only applies to regular files */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) && !S_ISREG(inode->i_mode))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool set_data_seq;
|
||||
int ret;
|
||||
|
||||
/* initially all setting is root only, could loosen with finer grained checks */
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/* check for errors before making any changes */
|
||||
if (!valid_attr_changes(inode, iax)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* retention prevents modification unless also clearing retention */
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0 && !((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
!(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION)))
|
||||
goto unlock;
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version > 0))
|
||||
set_data_seq = true;
|
||||
else
|
||||
set_data_seq = false;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq, true);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
/* creating offline extent first, it might fail */
|
||||
if (iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) {
|
||||
ret = scoutfs_data_init_offline_extent(inode, iax->size, lock);
|
||||
if (ret)
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* make all changes once they're all checked and will succeed */
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)
|
||||
scoutfs_inode_set_data_version(inode, iax->data_version);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_SIZE)
|
||||
i_size_write(inode, iax->size);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CTIME) {
|
||||
inode->i_ctime.tv_sec = iax->ctime_sec;
|
||||
inode->i_ctime.tv_nsec = iax->ctime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CRTIME) {
|
||||
si->crtime.tv_sec = iax->crtime_sec;
|
||||
si->crtime.tv_nsec = iax->crtime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) {
|
||||
scoutfs_inode_set_flags(inode, ~SCOUTFS_INO_FLAG_RETENTION,
|
||||
(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION) ?
|
||||
SCOUTFS_INO_FLAG_RETENTION : 0);
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID)
|
||||
scoutfs_inode_set_proj(inode, iax->project_id);
|
||||
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
ret = 0;
|
||||
release:
|
||||
scoutfs_release_trans(sb);
|
||||
unlock:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
11
kmod/src/attr_x.h
Normal file
11
kmod/src/attr_x.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef _SCOUTFS_ATTR_X_H_
|
||||
#define _SCOUTFS_ATTR_X_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include "ioctl.h"
|
||||
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
|
||||
#endif
|
||||
490
kmod/src/block.c
490
kmod/src/block.c
@@ -22,6 +22,8 @@
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/list_lru.h>
|
||||
#include <linux/stacktrace.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
@@ -38,26 +40,12 @@
|
||||
* than the page size. Callers can have their own contexts for tracking
|
||||
* dirty blocks that are written together. We pin dirty blocks in
|
||||
* memory and only checksum them all as they're all written.
|
||||
*
|
||||
* Memory reclaim is driven by maintaining two very coarse groups of
|
||||
* blocks. As we access blocks we mark them with an increasing counter
|
||||
* to discourage them from being reclaimed. We then define a threshold
|
||||
* at the current counter minus half the population. Recent blocks have
|
||||
* a counter greater than the threshold, and all other blocks with
|
||||
* counters less than it are considered older and are candidates for
|
||||
* reclaim. This results in access updates rarely modifying an atomic
|
||||
* counter as blocks need to be moved into the recent group, and shrink
|
||||
* can randomly scan blocks looking for the half of the population that
|
||||
* will be in the old group. It's reasonably effective, but is
|
||||
* particularly efficient and avoids contention between concurrent
|
||||
* accesses and shrinking.
|
||||
*/
|
||||
|
||||
struct block_info {
|
||||
struct super_block *sb;
|
||||
atomic_t total_inserted;
|
||||
atomic64_t access_counter;
|
||||
struct rhashtable ht;
|
||||
struct list_lru lru;
|
||||
wait_queue_head_t waitq;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
struct work_struct free_work;
|
||||
@@ -76,28 +64,15 @@ enum block_status_bits {
|
||||
BLOCK_BIT_PAGE_ALLOC, /* page (possibly high order) allocation */
|
||||
BLOCK_BIT_VIRT, /* mapped virt allocation */
|
||||
BLOCK_BIT_CRC_VALID, /* crc has been verified */
|
||||
BLOCK_BIT_ACCESSED, /* seen by lookup since last lru add/walk */
|
||||
};
|
||||
|
||||
/*
|
||||
* We want to tie atomic changes in refcounts to whether or not the
|
||||
* block is still visible in the hash table, so we store the hash
|
||||
* table's reference up at a known high bit. We could naturally set the
|
||||
* inserted bit through excessive refcount increments. We don't do
|
||||
* anything about that but at least warn if we get close.
|
||||
*
|
||||
* We're avoiding the high byte for no real good reason, just out of a
|
||||
* historical fear of implementations that don't provide the full
|
||||
* precision.
|
||||
*/
|
||||
#define BLOCK_REF_INSERTED (1U << 23)
|
||||
#define BLOCK_REF_FULL (BLOCK_REF_INSERTED >> 1)
|
||||
|
||||
struct block_private {
|
||||
struct scoutfs_block bl;
|
||||
struct super_block *sb;
|
||||
atomic_t refcount;
|
||||
u64 accessed;
|
||||
struct rhash_head ht_head;
|
||||
struct list_head lru_head;
|
||||
struct list_head dirty_entry;
|
||||
struct llist_node free_node;
|
||||
unsigned long bits;
|
||||
@@ -106,13 +81,15 @@ struct block_private {
|
||||
struct page *page;
|
||||
void *virt;
|
||||
};
|
||||
unsigned int stack_len;
|
||||
unsigned long stack[10];
|
||||
};
|
||||
|
||||
#define TRACE_BLOCK(which, bp) \
|
||||
do { \
|
||||
__typeof__(bp) _bp = (bp); \
|
||||
trace_scoutfs_block_##which(_bp->sb, _bp, _bp->bl.blkno, atomic_read(&_bp->refcount), \
|
||||
atomic_read(&_bp->io_count), _bp->bits, _bp->accessed); \
|
||||
atomic_read(&_bp->io_count), _bp->bits); \
|
||||
} while (0)
|
||||
|
||||
#define BLOCK_PRIVATE(_bl) \
|
||||
@@ -120,14 +97,23 @@ do { \
|
||||
|
||||
static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
|
||||
{
|
||||
int off = offsetof(struct scoutfs_block_header, crc) +
|
||||
FIELD_SIZEOF(struct scoutfs_block_header, crc);
|
||||
int off = offsetofend(struct scoutfs_block_header, crc);
|
||||
u32 calc = crc32c(~0, (char *)hdr + off, size - off);
|
||||
|
||||
return cpu_to_le32(calc);
|
||||
}
|
||||
|
||||
static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
static noinline void save_block_stack(struct block_private *bp)
|
||||
{
|
||||
bp->stack_len = stack_trace_save(bp->stack, ARRAY_SIZE(bp->stack), 2);
|
||||
}
|
||||
|
||||
static void print_block_stack(struct block_private *bp)
|
||||
{
|
||||
stack_trace_print(bp->stack, bp->stack_len, 1);
|
||||
}
|
||||
|
||||
static noinline struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct block_private *bp;
|
||||
unsigned int nofs_flags;
|
||||
@@ -159,7 +145,7 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
*/
|
||||
lockdep_off();
|
||||
nofs_flags = memalloc_nofs_save();
|
||||
bp->virt = __vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
bp->virt = kc__vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM);
|
||||
memalloc_nofs_restore(nofs_flags);
|
||||
lockdep_on();
|
||||
|
||||
@@ -177,11 +163,13 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
bp->bl.blkno = blkno;
|
||||
bp->sb = sb;
|
||||
atomic_set(&bp->refcount, 1);
|
||||
INIT_LIST_HEAD(&bp->lru_head);
|
||||
INIT_LIST_HEAD(&bp->dirty_entry);
|
||||
set_bit(BLOCK_BIT_NEW, &bp->bits);
|
||||
atomic_set(&bp->io_count, 0);
|
||||
|
||||
TRACE_BLOCK(allocate, bp);
|
||||
save_block_stack(bp);
|
||||
|
||||
out:
|
||||
if (!bp)
|
||||
@@ -234,32 +222,85 @@ static void block_free_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a reference to a block while holding an existing reference.
|
||||
* Users of blocks hold a refcount. If putting a refcount drops to zero
|
||||
* then the block is freed.
|
||||
*
|
||||
* Acquiring new references and claiming the exclusive right to tear
|
||||
* down a block is built around this LIVE_REFCOUNT_BASE refcount value.
|
||||
* As blocks are initially cached they have the live base added to their
|
||||
* refcount. Lookups will only increment the refcount and return blocks
|
||||
* for reference holders while the refcount is >= than the base.
|
||||
*
|
||||
* To remove a block from the cache and eventually free it, either by
|
||||
* the lru walk in the shrinker, or by reference holders, the live base
|
||||
* is removed and turned into a normal refcount increment that will be
|
||||
* put by the caller. This can only be done once for a block, and once
|
||||
* its done lookup will not return any more references.
|
||||
*/
|
||||
#define LIVE_REFCOUNT_BASE (INT_MAX ^ (INT_MAX >> 1))
|
||||
|
||||
/*
|
||||
* Inc the refcount while holding an incremented refcount. We can't
|
||||
* have so many individual reference holders that they pass the live
|
||||
* base.
|
||||
*/
|
||||
static void block_get(struct block_private *bp)
|
||||
{
|
||||
WARN_ON_ONCE((atomic_read(&bp->refcount) & ~BLOCK_REF_INSERTED) <= 0);
|
||||
int now = atomic_inc_return(&bp->refcount);
|
||||
|
||||
atomic_inc(&bp->refcount);
|
||||
BUG_ON(now <= 1);
|
||||
BUG_ON(now == LIVE_REFCOUNT_BASE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a reference to a block as long as it's been inserted in the hash
|
||||
* table and hasn't been removed.
|
||||
*/
|
||||
static struct block_private *block_get_if_inserted(struct block_private *bp)
|
||||
* if (*v >= u) {
|
||||
* *v += a;
|
||||
* return true;
|
||||
* }
|
||||
*/
|
||||
static bool atomic_add_unless_less(atomic_t *v, int a, int u)
|
||||
{
|
||||
int cnt;
|
||||
int c;
|
||||
|
||||
do {
|
||||
cnt = atomic_read(&bp->refcount);
|
||||
WARN_ON_ONCE(cnt & BLOCK_REF_FULL);
|
||||
if (!(cnt & BLOCK_REF_INSERTED))
|
||||
return NULL;
|
||||
c = atomic_read(v);
|
||||
if (c < u)
|
||||
return false;
|
||||
} while (atomic_cmpxchg(v, c, c + a) != c);
|
||||
|
||||
} while (atomic_cmpxchg(&bp->refcount, cnt, cnt + 1) != cnt);
|
||||
return true;
|
||||
}
|
||||
|
||||
return bp;
|
||||
static bool block_get_if_live(struct block_private *bp)
|
||||
{
|
||||
return atomic_add_unless_less(&bp->refcount, 1, LIVE_REFCOUNT_BASE);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the refcount still has the live base, subtract it and increment
|
||||
* the callers refcount that they'll put.
|
||||
*/
|
||||
static bool block_get_remove_live(struct block_private *bp)
|
||||
{
|
||||
return atomic_add_unless_less(&bp->refcount, (1 - LIVE_REFCOUNT_BASE), LIVE_REFCOUNT_BASE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only get the live base refcount if it is the only refcount remaining.
|
||||
* This means that there are no active refcount holders and the block
|
||||
* can't be dirty or under IO, which both hold references.
|
||||
*/
|
||||
static bool block_get_remove_live_only(struct block_private *bp)
|
||||
{
|
||||
int c;
|
||||
|
||||
do {
|
||||
c = atomic_read(&bp->refcount);
|
||||
if (c != LIVE_REFCOUNT_BASE)
|
||||
return false;
|
||||
} while (atomic_cmpxchg(&bp->refcount, c, c - LIVE_REFCOUNT_BASE + 1) != c);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -291,104 +332,73 @@ static const struct rhashtable_params block_ht_params = {
|
||||
};
|
||||
|
||||
/*
|
||||
* Insert a new block into the hash table. Once it is inserted in the
|
||||
* hash table readers can start getting references. The caller may have
|
||||
* multiple refs but the block can't already be inserted.
|
||||
* Insert the block into the cache so that it's visible for lookups.
|
||||
* The caller can hold references (including for a dirty block).
|
||||
*
|
||||
* We make sure the base is added and the block is in the lru once it's
|
||||
* in the hash. If hash table insertion fails it'll be briefly visible
|
||||
* in the lru, but won't be isolated/evicted because we hold an
|
||||
* incremented refcount in addition to the live base.
|
||||
*/
|
||||
static int block_insert(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) & BLOCK_REF_INSERTED);
|
||||
|
||||
BUG_ON(atomic_read(&bp->refcount) >= LIVE_REFCOUNT_BASE);
|
||||
atomic_add(LIVE_REFCOUNT_BASE, &bp->refcount);
|
||||
smp_mb__after_atomic(); /* make sure live base is visible to list_lru walk */
|
||||
list_lru_add_obj(&binf->lru, &bp->lru_head);
|
||||
retry:
|
||||
atomic_add(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
ret = rhashtable_lookup_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
if (ret < 0) {
|
||||
atomic_sub(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
if (ret == -EBUSY) {
|
||||
/* wait for pending rebalance to finish */
|
||||
synchronize_rcu();
|
||||
goto retry;
|
||||
} else {
|
||||
atomic_sub(LIVE_REFCOUNT_BASE, &bp->refcount);
|
||||
BUG_ON(atomic_read(&bp->refcount) >= LIVE_REFCOUNT_BASE);
|
||||
list_lru_del_obj(&binf->lru, &bp->lru_head);
|
||||
}
|
||||
} else {
|
||||
atomic_inc(&binf->total_inserted);
|
||||
TRACE_BLOCK(insert, bp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 accessed_recently(struct block_info *binf)
|
||||
{
|
||||
return atomic64_read(&binf->access_counter) - (atomic_read(&binf->total_inserted) >> 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that a block that is being accessed is less likely to be
|
||||
* reclaimed if it is seen by the shrinker. If the block hasn't been
|
||||
* accessed recently we update its accessed value.
|
||||
* Indicate to the lru walker that this block has been accessed since it
|
||||
* was added or last walked.
|
||||
*/
|
||||
static void block_accessed(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
|
||||
if (bp->accessed == 0 || bp->accessed < accessed_recently(binf)) {
|
||||
if (!test_and_set_bit(BLOCK_BIT_ACCESSED, &bp->bits))
|
||||
scoutfs_inc_counter(sb, block_cache_access_update);
|
||||
bp->accessed = atomic64_inc_return(&binf->access_counter);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller wants to remove the block from the hash table and has an
|
||||
* idea what the refcount should be. If the refcount does still
|
||||
* indicate that the block is hashed, and we're able to clear that bit,
|
||||
* then we can remove it from the hash table.
|
||||
* Remove the block from the cache. When this returns the block won't
|
||||
* be visible for additional references from lookup.
|
||||
*
|
||||
* The caller makes sure that it's safe to be referencing this block,
|
||||
* either with their own held reference (most everything) or by being in
|
||||
* an rcu grace period (shrink).
|
||||
*/
|
||||
static bool block_remove_cnt(struct super_block *sb, struct block_private *bp, int cnt)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
int ret;
|
||||
|
||||
if ((cnt & BLOCK_REF_INSERTED) &&
|
||||
(atomic_cmpxchg(&bp->refcount, cnt, cnt & ~BLOCK_REF_INSERTED) == cnt)) {
|
||||
|
||||
TRACE_BLOCK(remove, bp);
|
||||
ret = rhashtable_remove_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
WARN_ON_ONCE(ret); /* must have been inserted */
|
||||
atomic_dec(&binf->total_inserted);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to remove the block from the hash table as long as the refcount
|
||||
* indicates that it is still in the hash table. This can be racing
|
||||
* with normal refcount changes so it might have to retry.
|
||||
* We always try and remove from the hash table. It's safe to remove a
|
||||
* block that isn't hashed, it just returns -ENOENT.
|
||||
*
|
||||
* This is racing with the lru walk in the shrinker also trying to
|
||||
* remove idle blocks from the cache. They both try to remove the live
|
||||
* refcount base and perform their removal and put if they get it.
|
||||
*/
|
||||
static void block_remove(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
int cnt;
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
|
||||
do {
|
||||
cnt = atomic_read(&bp->refcount);
|
||||
} while ((cnt & BLOCK_REF_INSERTED) && !block_remove_cnt(sb, bp, cnt));
|
||||
}
|
||||
rhashtable_remove_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
|
||||
/*
|
||||
* Take one shot at removing the block from the hash table if it's still
|
||||
* in the hash table and the caller has the only other reference.
|
||||
*/
|
||||
static bool block_remove_solo(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
return block_remove_cnt(sb, bp, BLOCK_REF_INSERTED | 1);
|
||||
if (block_get_remove_live(bp)) {
|
||||
list_lru_del_obj(&binf->lru, &bp->lru_head);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool io_busy(struct block_private *bp)
|
||||
@@ -397,37 +407,6 @@ static bool io_busy(struct block_private *bp)
|
||||
return test_bit(BLOCK_BIT_IO_BUSY, &bp->bits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called during shutdown with no other users.
|
||||
*/
|
||||
static void block_remove_all(struct super_block *sb)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
struct rhashtable_iter iter;
|
||||
struct block_private *bp;
|
||||
|
||||
rhashtable_walk_enter(&binf->ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
for (;;) {
|
||||
bp = rhashtable_walk_next(&iter);
|
||||
if (bp == NULL)
|
||||
break;
|
||||
if (bp == ERR_PTR(-EAGAIN))
|
||||
continue;
|
||||
|
||||
if (block_get_if_inserted(bp)) {
|
||||
block_remove(sb, bp);
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) != 1);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&binf->total_inserted) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX The io_count and sb fields in the block_private are only used
|
||||
@@ -438,7 +417,7 @@ static void block_remove_all(struct super_block *sb)
|
||||
* possible. Final freeing, verifying checksums, and unlinking errored
|
||||
* blocks are all done by future users of the blocks.
|
||||
*/
|
||||
static void block_end_io(struct super_block *sb, unsigned int opf,
|
||||
static void block_end_io(struct super_block *sb, blk_opf_t opf,
|
||||
struct block_private *bp, int err)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
@@ -478,7 +457,7 @@ static void KC_DECLARE_BIO_END_IO(block_bio_end_io, struct bio *bio)
|
||||
* Kick off IO for a single block.
|
||||
*/
|
||||
static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
unsigned int opf)
|
||||
blk_opf_t opf)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct bio *bio = NULL;
|
||||
@@ -489,7 +468,7 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
int ret = 0;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
return -ENOLINK;
|
||||
|
||||
sector = bp->bl.blkno << (SCOUTFS_BLOCK_LG_SHIFT - 9);
|
||||
|
||||
@@ -505,15 +484,13 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
|
||||
for (off = 0; off < SCOUTFS_BLOCK_LG_SIZE; off += PAGE_SIZE) {
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOFS, SCOUTFS_BLOCK_LG_PAGES_PER);
|
||||
bio = kc_bio_alloc(sbi->meta_bdev, SCOUTFS_BLOCK_LG_PAGES_PER, opf, GFP_NOFS);
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
kc_bio_set_opf(bio, opf);
|
||||
kc_bio_set_sector(bio, sector + (off >> 9));
|
||||
bio_set_dev(bio, sbi->meta_bdev);
|
||||
bio->bi_end_io = block_bio_end_io;
|
||||
bio->bi_private = bp;
|
||||
|
||||
@@ -546,6 +523,10 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a block with an elevated refcount if it was present in the
|
||||
* hash table and its refcount didn't indicate that it was being freed.
|
||||
*/
|
||||
static struct block_private *block_lookup(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
@@ -553,8 +534,8 @@ static struct block_private *block_lookup(struct super_block *sb, u64 blkno)
|
||||
|
||||
rcu_read_lock();
|
||||
bp = rhashtable_lookup(&binf->ht, &blkno, block_ht_params);
|
||||
if (bp)
|
||||
bp = block_get_if_inserted(bp);
|
||||
if (bp && !block_get_if_live(bp))
|
||||
bp = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return bp;
|
||||
@@ -683,6 +664,7 @@ int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct block_private *bp = NULL;
|
||||
bool retried = false;
|
||||
__le32 crc = 0;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
@@ -695,7 +677,9 @@ retry:
|
||||
|
||||
/* corrupted writes might be a sign of a stale reference */
|
||||
if (!test_bit(BLOCK_BIT_CRC_VALID, &bp->bits)) {
|
||||
if (hdr->crc != block_calc_crc(hdr, SCOUTFS_BLOCK_LG_SIZE)) {
|
||||
crc = block_calc_crc(hdr, SCOUTFS_BLOCK_LG_SIZE);
|
||||
if (hdr->crc != crc) {
|
||||
trace_scoutfs_block_stale(sb, ref, hdr, magic, le32_to_cpu(crc));
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
@@ -705,14 +689,15 @@ retry:
|
||||
|
||||
if (hdr->magic != cpu_to_le32(magic) || hdr->fsid != cpu_to_le64(sbi->fsid) ||
|
||||
hdr->seq != ref->seq || hdr->blkno != ref->blkno) {
|
||||
trace_scoutfs_block_stale(sb, ref, hdr, magic, 0);
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if ((ret == -ESTALE || scoutfs_trigger(sb, BLOCK_REMOVE_STALE)) &&
|
||||
!retried && !block_is_dirty(bp)) {
|
||||
if (!retried && !IS_ERR_OR_NULL(bp) && !block_is_dirty(bp) &&
|
||||
(ret == -ESTALE || scoutfs_trigger(sb, BLOCK_REMOVE_STALE))) {
|
||||
retried = true;
|
||||
scoutfs_inc_counter(sb, block_cache_remove_stale);
|
||||
block_remove(sb, bp);
|
||||
@@ -1077,100 +1062,106 @@ static unsigned long block_count_objects(struct shrinker *shrink, struct shrink_
|
||||
struct super_block *sb = binf->sb;
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_count_objects);
|
||||
|
||||
return shrinker_min_long(atomic_read(&binf->total_inserted));
|
||||
return list_lru_shrink_count(&binf->lru, sc);
|
||||
}
|
||||
|
||||
struct isolate_args {
|
||||
struct super_block *sb;
|
||||
struct list_head dispose;
|
||||
};
|
||||
|
||||
#define DECLARE_ISOLATE_ARGS(sb_, name_) \
|
||||
struct isolate_args name_ = { \
|
||||
.sb = sb_, \
|
||||
.dispose = LIST_HEAD_INIT(name_.dispose), \
|
||||
}
|
||||
|
||||
static enum lru_status isolate_lru_block(struct list_head *item, struct list_lru_one *list,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct block_private *bp = container_of(item, struct block_private, lru_head);
|
||||
struct isolate_args *ia = cb_arg;
|
||||
|
||||
TRACE_BLOCK(isolate, bp);
|
||||
|
||||
/* rotate accessed blocks to the tail of the list (lazy promotion) */
|
||||
if (test_and_clear_bit(BLOCK_BIT_ACCESSED, &bp->bits)) {
|
||||
scoutfs_inc_counter(ia->sb, block_cache_isolate_rotate);
|
||||
return LRU_ROTATE;
|
||||
}
|
||||
|
||||
/* any refs, including dirty/io, stop us from acquiring lru refcount */
|
||||
if (!block_get_remove_live_only(bp)) {
|
||||
scoutfs_inc_counter(ia->sb, block_cache_isolate_skip);
|
||||
return LRU_SKIP;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(ia->sb, block_cache_isolate_removed);
|
||||
list_lru_isolate_move(list, &bp->lru_head, &ia->dispose);
|
||||
return LRU_REMOVED;
|
||||
}
|
||||
|
||||
static void shrink_dispose_blocks(struct super_block *sb, struct list_head *dispose)
|
||||
{
|
||||
struct block_private *bp;
|
||||
struct block_private *bp__;
|
||||
|
||||
list_for_each_entry_safe(bp, bp__, dispose, lru_head) {
|
||||
list_del_init(&bp->lru_head);
|
||||
block_remove(sb, bp);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a number of cached blocks that haven't been used recently.
|
||||
*
|
||||
* We don't maintain a strictly ordered LRU to avoid the contention of
|
||||
* accesses always moving blocks around in some precise global
|
||||
* structure.
|
||||
*
|
||||
* Instead we use counters to divide the blocks into two roughly equal
|
||||
* groups by how recently they were accessed. We randomly walk all
|
||||
* inserted blocks looking for any blocks in the older half to remove
|
||||
* and free. The random walk and there being two groups means that we
|
||||
* typically only walk a small multiple of the number we're looking for
|
||||
* before we find them all.
|
||||
*
|
||||
* Our rcu walk of blocks can see blocks in all stages of their life
|
||||
* cycle, from dirty blocks to those with 0 references that are queued
|
||||
* for freeing. We only want to free idle inserted blocks so we
|
||||
* atomically remove blocks when the only references are ours and the
|
||||
* hash table.
|
||||
*/
|
||||
static unsigned long block_scan_objects(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct block_info *binf = KC_SHRINKER_CONTAINER_OF(shrink, struct block_info);
|
||||
struct super_block *sb = binf->sb;
|
||||
struct rhashtable_iter iter;
|
||||
struct block_private *bp;
|
||||
bool stop = false;
|
||||
unsigned long freed = 0;
|
||||
unsigned long nr = sc->nr_to_scan;
|
||||
u64 recently;
|
||||
DECLARE_ISOLATE_ARGS(sb, ia);
|
||||
unsigned long freed;
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_scan_objects);
|
||||
|
||||
recently = accessed_recently(binf);
|
||||
rhashtable_walk_enter(&binf->ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
freed = kc_list_lru_shrink_walk(&binf->lru, sc, isolate_lru_block, &ia);
|
||||
shrink_dispose_blocks(sb, &ia.dispose);
|
||||
return freed;
|
||||
}
|
||||
|
||||
/*
|
||||
* This isn't great but I don't see a better way. We want to
|
||||
* walk the hash from a random point so that we're not
|
||||
* constantly walking over the same region that we've already
|
||||
* freed old blocks within. The interface doesn't let us do
|
||||
* this explicitly, but this seems to work? The difference this
|
||||
* makes is enormous, around a few orders of magnitude fewer
|
||||
* _nexts per shrink.
|
||||
*/
|
||||
if (iter.walker.tbl)
|
||||
iter.slot = prandom_u32_max(iter.walker.tbl->size);
|
||||
static enum lru_status dump_lru_block(struct list_head *item, struct list_lru_one *list,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct block_private *bp = container_of(item, struct block_private, lru_head);
|
||||
|
||||
while (nr > 0) {
|
||||
bp = rhashtable_walk_next(&iter);
|
||||
if (bp == NULL)
|
||||
break;
|
||||
if (bp == ERR_PTR(-EAGAIN)) {
|
||||
/*
|
||||
* We can be called from reclaim in the allocation
|
||||
* to resize the hash table itself. We have to
|
||||
* return so that the caller can proceed and
|
||||
* enable hash table iteration again.
|
||||
*/
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_stop);
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
printk("blkno %llu refcount 0x%x io_count %d bits 0x%lx\n",
|
||||
bp->bl.blkno, atomic_read(&bp->refcount), atomic_read(&bp->io_count),
|
||||
bp->bits);
|
||||
print_block_stack(bp);
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_next);
|
||||
return LRU_SKIP;
|
||||
}
|
||||
|
||||
if (bp->accessed >= recently) {
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_recent);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Called during shutdown with no other users. The isolating walk must
|
||||
* find blocks on the lru that only have references for presence on the
|
||||
* lru and in the hash table.
|
||||
*/
|
||||
static void block_shrink_all(struct super_block *sb)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
DECLARE_ISOLATE_ARGS(sb, ia);
|
||||
long count;
|
||||
|
||||
if (block_get_if_inserted(bp)) {
|
||||
if (block_remove_solo(sb, bp)) {
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_remove);
|
||||
TRACE_BLOCK(shrink, bp);
|
||||
freed++;
|
||||
nr--;
|
||||
}
|
||||
block_put(sb, bp);
|
||||
}
|
||||
count = DIV_ROUND_UP(list_lru_count(&binf->lru), 128) * 2;
|
||||
do {
|
||||
kc_list_lru_walk(&binf->lru, isolate_lru_block, &ia, 128);
|
||||
shrink_dispose_blocks(sb, &ia.dispose);
|
||||
} while (list_lru_count(&binf->lru) > 0 && --count > 0);
|
||||
|
||||
count = list_lru_count(&binf->lru);
|
||||
if (count > 0) {
|
||||
scoutfs_err(sb, "failed to isolate/dispose %ld blocks", count);
|
||||
kc_list_lru_walk(&binf->lru, dump_lru_block, sb, count);
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
if (stop)
|
||||
return SHRINK_STOP;
|
||||
else
|
||||
return freed;
|
||||
}
|
||||
|
||||
struct sm_block_completion {
|
||||
@@ -1197,7 +1188,7 @@ static void KC_DECLARE_BIO_END_IO(sm_block_bio_end_io, struct bio *bio)
|
||||
* only layer that sees the full block buffer so we pass the calculated
|
||||
* crc to the caller for them to check in their context.
|
||||
*/
|
||||
static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsigned int opf,
|
||||
static int sm_block_io(struct super_block *sb, struct block_device *bdev, blk_opf_t opf,
|
||||
u64 blkno, struct scoutfs_block_header *hdr, size_t len, __le32 *blk_crc)
|
||||
{
|
||||
struct scoutfs_block_header *pg_hdr;
|
||||
@@ -1209,7 +1200,7 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsign
|
||||
BUILD_BUG_ON(PAGE_SIZE < SCOUTFS_BLOCK_SM_SIZE);
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
return -ENOLINK;
|
||||
|
||||
if (WARN_ON_ONCE(len > SCOUTFS_BLOCK_SM_SIZE) ||
|
||||
WARN_ON_ONCE(!op_is_write(opf) && !blk_crc))
|
||||
@@ -1229,15 +1220,13 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsign
|
||||
pg_hdr->crc = block_calc_crc(pg_hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
}
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, 1);
|
||||
bio = kc_bio_alloc(bdev, 1, opf, GFP_NOFS);
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kc_bio_set_opf(bio, opf | REQ_SYNC);
|
||||
kc_bio_set_sector(bio, blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9));
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_end_io = sm_block_bio_end_io;
|
||||
bio->bi_private = &sbc;
|
||||
bio_add_page(bio, page, SCOUTFS_BLOCK_SM_SIZE, 0);
|
||||
@@ -1277,7 +1266,7 @@ int scoutfs_block_write_sm(struct super_block *sb,
|
||||
int scoutfs_block_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct block_info *binf;
|
||||
struct block_info *binf = NULL;
|
||||
int ret;
|
||||
|
||||
binf = kzalloc(sizeof(struct block_info), GFP_KERNEL);
|
||||
@@ -1286,19 +1275,19 @@ int scoutfs_block_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rhashtable_init(&binf->ht, &block_ht_params);
|
||||
if (ret < 0) {
|
||||
kfree(binf);
|
||||
ret = list_lru_init(&binf->lru);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = rhashtable_init(&binf->ht, &block_ht_params);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
binf->sb = sb;
|
||||
atomic_set(&binf->total_inserted, 0);
|
||||
atomic64_set(&binf->access_counter, 0);
|
||||
init_waitqueue_head(&binf->waitq);
|
||||
KC_INIT_SHRINKER_FUNCS(&binf->shrinker, block_count_objects,
|
||||
block_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&binf->shrinker);
|
||||
KC_REGISTER_SHRINKER(&binf->shrinker, "scoutfs-block:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_WORK(&binf->free_work, block_free_work);
|
||||
init_llist_head(&binf->free_llist);
|
||||
|
||||
@@ -1306,8 +1295,10 @@ int scoutfs_block_setup(struct super_block *sb)
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_block_destroy(sb);
|
||||
if (ret < 0 && binf) {
|
||||
list_lru_destroy(&binf->lru);
|
||||
kfree(binf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1319,9 +1310,10 @@ void scoutfs_block_destroy(struct super_block *sb)
|
||||
|
||||
if (binf) {
|
||||
KC_UNREGISTER_SHRINKER(&binf->shrinker);
|
||||
block_remove_all(sb);
|
||||
block_shrink_all(sb);
|
||||
flush_work(&binf->free_work);
|
||||
rhashtable_destroy(&binf->ht);
|
||||
list_lru_destroy(&binf->lru);
|
||||
|
||||
kfree(binf);
|
||||
sbi->block_info = NULL;
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -68,6 +69,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_net_inode_alloc ial;
|
||||
__le64 lecount = cpu_to_le64(count);
|
||||
u64 tmp;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
@@ -80,7 +82,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
|
||||
if (*nr == 0)
|
||||
ret = -ENOSPC;
|
||||
else if (*ino + *nr < *ino)
|
||||
else if (check_add_overflow(*ino, *nr - 1, &tmp))
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -433,8 +435,8 @@ static int lookup_mounted_client_item(struct super_block *sb, u64 rid)
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
kfree(super);
|
||||
out:
|
||||
kfree(super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,17 +26,15 @@
|
||||
EXPAND_COUNTER(block_cache_alloc_page_order) \
|
||||
EXPAND_COUNTER(block_cache_alloc_virt) \
|
||||
EXPAND_COUNTER(block_cache_end_io_error) \
|
||||
EXPAND_COUNTER(block_cache_isolate_removed) \
|
||||
EXPAND_COUNTER(block_cache_isolate_rotate) \
|
||||
EXPAND_COUNTER(block_cache_isolate_skip) \
|
||||
EXPAND_COUNTER(block_cache_forget) \
|
||||
EXPAND_COUNTER(block_cache_free) \
|
||||
EXPAND_COUNTER(block_cache_free_work) \
|
||||
EXPAND_COUNTER(block_cache_remove_stale) \
|
||||
EXPAND_COUNTER(block_cache_count_objects) \
|
||||
EXPAND_COUNTER(block_cache_scan_objects) \
|
||||
EXPAND_COUNTER(block_cache_shrink) \
|
||||
EXPAND_COUNTER(block_cache_shrink_next) \
|
||||
EXPAND_COUNTER(block_cache_shrink_recent) \
|
||||
EXPAND_COUNTER(block_cache_shrink_remove) \
|
||||
EXPAND_COUNTER(block_cache_shrink_stop) \
|
||||
EXPAND_COUNTER(btree_compact_values) \
|
||||
EXPAND_COUNTER(btree_compact_values_enomem) \
|
||||
EXPAND_COUNTER(btree_delete) \
|
||||
@@ -90,6 +88,7 @@
|
||||
EXPAND_COUNTER(forest_read_items) \
|
||||
EXPAND_COUNTER(forest_roots_next_hint) \
|
||||
EXPAND_COUNTER(forest_set_bloom_bits) \
|
||||
EXPAND_COUNTER(inode_deleted) \
|
||||
EXPAND_COUNTER(item_cache_count_objects) \
|
||||
EXPAND_COUNTER(item_cache_scan_objects) \
|
||||
EXPAND_COUNTER(item_clear_dirty) \
|
||||
@@ -117,10 +116,11 @@
|
||||
EXPAND_COUNTER(item_pcpu_page_hit) \
|
||||
EXPAND_COUNTER(item_pcpu_page_miss) \
|
||||
EXPAND_COUNTER(item_pcpu_page_miss_keys) \
|
||||
EXPAND_COUNTER(item_read_pages_barrier) \
|
||||
EXPAND_COUNTER(item_read_pages_retry) \
|
||||
EXPAND_COUNTER(item_read_pages_split) \
|
||||
EXPAND_COUNTER(item_shrink_page) \
|
||||
EXPAND_COUNTER(item_shrink_page_dirty) \
|
||||
EXPAND_COUNTER(item_shrink_page_reader) \
|
||||
EXPAND_COUNTER(item_shrink_page_trylock) \
|
||||
EXPAND_COUNTER(item_update) \
|
||||
EXPAND_COUNTER(item_write_dirty) \
|
||||
@@ -145,6 +145,7 @@
|
||||
EXPAND_COUNTER(lock_shrink_work) \
|
||||
EXPAND_COUNTER(lock_unlock) \
|
||||
EXPAND_COUNTER(lock_wait) \
|
||||
EXPAND_COUNTER(log_merge_no_finalized) \
|
||||
EXPAND_COUNTER(log_merge_wait_timeout) \
|
||||
EXPAND_COUNTER(net_dropped_response) \
|
||||
EXPAND_COUNTER(net_send_bytes) \
|
||||
@@ -162,6 +163,8 @@
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(quota_info_count_objects) \
|
||||
EXPAND_COUNTER(quota_info_scan_objects) \
|
||||
EXPAND_COUNTER(quorum_candidate_server_stopping) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
@@ -179,6 +182,7 @@
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
EXPAND_COUNTER(quorum_server_shutdown) \
|
||||
EXPAND_COUNTER(quorum_term_follower) \
|
||||
EXPAND_COUNTER(reclaimed_open_logs) \
|
||||
EXPAND_COUNTER(server_commit_hold) \
|
||||
EXPAND_COUNTER(server_commit_queue) \
|
||||
EXPAND_COUNTER(server_commit_worker) \
|
||||
@@ -199,20 +203,19 @@
|
||||
EXPAND_COUNTER(srch_read_stale) \
|
||||
EXPAND_COUNTER(statfs) \
|
||||
EXPAND_COUNTER(totl_read_copied) \
|
||||
EXPAND_COUNTER(totl_read_finalized) \
|
||||
EXPAND_COUNTER(totl_read_fs) \
|
||||
EXPAND_COUNTER(totl_read_item) \
|
||||
EXPAND_COUNTER(totl_read_logged) \
|
||||
EXPAND_COUNTER(trans_commit_data_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_dirty_meta_full) \
|
||||
EXPAND_COUNTER(trans_commit_fsync) \
|
||||
EXPAND_COUNTER(trans_commit_meta_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_sync_fs) \
|
||||
EXPAND_COUNTER(trans_commit_timer) \
|
||||
EXPAND_COUNTER(trans_commit_written)
|
||||
EXPAND_COUNTER(trans_commit_written) \
|
||||
EXPAND_COUNTER(wkic_count_objects) \
|
||||
EXPAND_COUNTER(wkic_scan_objects)
|
||||
|
||||
#define FIRST_COUNTER alloc_alloc_data
|
||||
#define LAST_COUNTER trans_commit_written
|
||||
#define LAST_COUNTER wkic_scan_objects
|
||||
|
||||
#undef EXPAND_COUNTER
|
||||
#define EXPAND_COUNTER(which) struct percpu_counter which;
|
||||
|
||||
471
kmod/src/data.c
471
kmod/src/data.c
@@ -20,7 +20,9 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/fiemap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
@@ -558,7 +560,7 @@ static int scoutfs_get_block(struct inode *inode, sector_t iblock,
|
||||
u64 offset;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(create && !inode_is_locked(inode));
|
||||
WARN_ON_ONCE(create && !rwsem_is_locked(&si->extent_sem));
|
||||
|
||||
/* make sure caller holds a cluster lock */
|
||||
lock = scoutfs_per_task_get(&si->pt_data_lock);
|
||||
@@ -586,6 +588,12 @@ static int scoutfs_get_block(struct inode *inode, sector_t iblock,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (create && !si->staging) {
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* convert unwritten to written, could be staging */
|
||||
if (create && ext.map && (ext.flags & SEF_UNWRITTEN)) {
|
||||
un.start = iblock;
|
||||
@@ -673,8 +681,14 @@ int scoutfs_get_block_write(struct inode *inode, sector_t iblock, struct buffer_
|
||||
* We can return errors from locking and checking offline extents. The
|
||||
* page is unlocked if we return an error.
|
||||
*/
|
||||
#ifdef KC_MPAGE_READ_FOLIO
|
||||
static int scoutfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
#else
|
||||
static int scoutfs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
#endif
|
||||
struct inode *inode = file->f_inode;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
@@ -721,7 +735,11 @@ static int scoutfs_readpage(struct file *file, struct page *page)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_MPAGE_READ_FOLIO
|
||||
ret = mpage_read_folio(folio, scoutfs_get_block_read);
|
||||
#else
|
||||
ret = mpage_readpage(page, scoutfs_get_block_read);
|
||||
#endif
|
||||
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
@@ -819,7 +837,10 @@ struct write_begin_data {
|
||||
|
||||
static int scoutfs_write_begin(struct file *file,
|
||||
struct address_space *mapping, loff_t pos,
|
||||
unsigned len, unsigned flags,
|
||||
unsigned len,
|
||||
#ifdef KC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
unsigned flags,
|
||||
#endif
|
||||
struct page **pagep, void **fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
@@ -854,13 +875,18 @@ retry:
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
#ifdef KC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
/* can't re-enter fs, have trans */
|
||||
flags |= AOP_FLAG_NOFS;
|
||||
#endif
|
||||
|
||||
/* generic write_end updates i_size and calls dirty_inode */
|
||||
ret = scoutfs_dirty_inode_item(inode, wbd->lock) ?:
|
||||
block_write_begin(mapping, pos, len, flags, pagep,
|
||||
scoutfs_get_block_write);
|
||||
block_write_begin(mapping, pos, len,
|
||||
#ifdef KC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
flags,
|
||||
#endif
|
||||
pagep, scoutfs_get_block_write);
|
||||
if (ret < 0) {
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &wbd->ind_locks);
|
||||
@@ -1062,6 +1088,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
loff_t end;
|
||||
u64 iblock;
|
||||
u64 last;
|
||||
loff_t tmp;
|
||||
s64 ret;
|
||||
|
||||
/* XXX support more flags */
|
||||
@@ -1070,14 +1097,14 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* catch wrapping */
|
||||
if (offset + len < offset) {
|
||||
ret = -EINVAL;
|
||||
if (len == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
ret = 0;
|
||||
/* catch wrapping */
|
||||
if (check_add_overflow(offset, len - 1, &tmp)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1104,6 +1131,10 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
|
||||
while(iblock <= last) {
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out_extent;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret)
|
||||
goto out_extent;
|
||||
@@ -1155,9 +1186,9 @@ out:
|
||||
* on regular files with no data extents. It's used to restore a file
|
||||
* with an offline extent which can then trigger staging.
|
||||
*
|
||||
* The caller has taken care of locking the inode. We're updating the
|
||||
* inode offline count as we create the offline extent so we take care
|
||||
* of the index locking, updating, and transaction.
|
||||
* The caller must take care of cluster locking, transactions, inode
|
||||
* updates, and index updates (so that they can atomically make this
|
||||
* change along with other metadata changes).
|
||||
*/
|
||||
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
struct scoutfs_lock *lock)
|
||||
@@ -1171,7 +1202,6 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
.lock = lock,
|
||||
};
|
||||
const u64 count = DIV_ROUND_UP(size, SCOUTFS_BLOCK_SM_SIZE);
|
||||
LIST_HEAD(ind_locks);
|
||||
u64 on;
|
||||
u64 off;
|
||||
int ret;
|
||||
@@ -1184,28 +1214,10 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* we're updating meta_seq with offline block count */
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
ret = scoutfs_ext_insert(sb, &data_ext_ops, &args,
|
||||
0, count, 0, SEF_OFFLINE);
|
||||
up_write(&si->extent_sem);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
unlock:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -1273,6 +1285,9 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!is_stage && (ret = scoutfs_inode_check_retention(to)))
|
||||
goto out;
|
||||
|
||||
if ((from_off & SCOUTFS_BLOCK_SM_MASK) ||
|
||||
(to_off & SCOUTFS_BLOCK_SM_MASK) ||
|
||||
((byte_len & SCOUTFS_BLOCK_SM_MASK) &&
|
||||
@@ -1310,8 +1325,8 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = inode_permission(from, MAY_WRITE) ?:
|
||||
inode_permission(to, MAY_WRITE);
|
||||
ret = inode_permission(KC_VFS_INIT_NS from, MAY_WRITE) ?:
|
||||
inode_permission(KC_VFS_INIT_NS to, MAY_WRITE);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@@ -1536,33 +1551,32 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
struct super_block *sb = inode->i_sb;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct scoutfs_extent *info = NULL;
|
||||
struct page *page = NULL;
|
||||
struct scoutfs_extent ext;
|
||||
struct scoutfs_extent cur;
|
||||
struct data_ext_args args;
|
||||
u32 last_flags;
|
||||
u64 iblock;
|
||||
u64 last;
|
||||
int entries = 0;
|
||||
int ret;
|
||||
int complete = 0;
|
||||
|
||||
if (len == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
|
||||
ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
down_read(&si->extent_sem);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
args.ino = ino;
|
||||
args.inode = inode;
|
||||
args.lock = lock;
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* use a dummy extent to track */
|
||||
memset(&cur, 0, sizeof(cur));
|
||||
@@ -1571,48 +1585,93 @@ int scoutfs_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
iblock = start >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
last = (start + len - 1) >> SCOUTFS_BLOCK_SM_SHIFT;
|
||||
|
||||
args.ino = ino;
|
||||
args.inode = inode;
|
||||
|
||||
/* outer loop */
|
||||
while (iblock <= last) {
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
|
||||
iblock, 1, &ext);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
/* lock */
|
||||
inode_lock(inode);
|
||||
down_read(&si->extent_sem);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &lock);
|
||||
if (ret) {
|
||||
up_read(&si->extent_sem);
|
||||
inode_unlock(inode);
|
||||
break;
|
||||
}
|
||||
|
||||
args.lock = lock;
|
||||
|
||||
/* collect entries */
|
||||
info = page_address(page);
|
||||
memset(info, 0, PAGE_SIZE);
|
||||
while (entries < (PAGE_SIZE / sizeof(struct fiemap_extent)) - 1) {
|
||||
ret = scoutfs_ext_next(sb, &data_ext_ops, &args,
|
||||
iblock, 1, &ext);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
complete = 1;
|
||||
last_flags = FIEMAP_EXTENT_LAST;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_data_fiemap_extent(sb, ino, &ext);
|
||||
|
||||
if (ext.start > last) {
|
||||
/* not setting _LAST, it's for end of file */
|
||||
ret = 0;
|
||||
last_flags = FIEMAP_EXTENT_LAST;
|
||||
break;
|
||||
complete = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (scoutfs_ext_can_merge(&cur, &ext)) {
|
||||
/* merged extents could be greater than input len */
|
||||
cur.len += ext.len;
|
||||
} else {
|
||||
/* fill it */
|
||||
memcpy(info, &cur, sizeof(cur));
|
||||
|
||||
entries++;
|
||||
info++;
|
||||
|
||||
cur = ext;
|
||||
}
|
||||
|
||||
iblock = ext.start + ext.len;
|
||||
}
|
||||
|
||||
trace_scoutfs_data_fiemap_extent(sb, ino, &ext);
|
||||
/* unlock */
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
up_read(&si->extent_sem);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (ext.start > last) {
|
||||
/* not setting _LAST, it's for end of file */
|
||||
ret = 0;
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (scoutfs_ext_can_merge(&cur, &ext)) {
|
||||
/* merged extents could be greater than input len */
|
||||
cur.len += ext.len;
|
||||
} else {
|
||||
ret = fill_extent(fieinfo, &cur, 0);
|
||||
/* emit entries */
|
||||
info = page_address(page);
|
||||
for (; entries > 0; entries--) {
|
||||
ret = fill_extent(fieinfo, info, 0);
|
||||
if (ret != 0)
|
||||
goto unlock;
|
||||
cur = ext;
|
||||
goto out;
|
||||
info++;
|
||||
}
|
||||
|
||||
iblock = ext.start + ext.len;
|
||||
if (complete)
|
||||
break;
|
||||
}
|
||||
|
||||
/* still one left, it's in cur */
|
||||
if (cur.len)
|
||||
ret = fill_extent(fieinfo, &cur, last_flags);
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
up_read(&si->extent_sem);
|
||||
inode_unlock(inode);
|
||||
|
||||
out:
|
||||
if (ret == 1)
|
||||
ret = 0;
|
||||
|
||||
if (page)
|
||||
__free_page(page);
|
||||
trace_scoutfs_data_fiemap(sb, start, len, ret);
|
||||
|
||||
return ret;
|
||||
@@ -1715,12 +1774,16 @@ int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
u64 last_block;
|
||||
u64 on;
|
||||
u64 off;
|
||||
loff_t tmp;
|
||||
int ret = 0;
|
||||
|
||||
if (len == 0)
|
||||
goto out;
|
||||
|
||||
if (WARN_ON_ONCE(sef & SEF_UNKNOWN) ||
|
||||
WARN_ON_ONCE(op & SCOUTFS_IOC_DWO_UNKNOWN) ||
|
||||
WARN_ON_ONCE(dw && !RB_EMPTY_NODE(&dw->node)) ||
|
||||
WARN_ON_ONCE(pos + len < pos)) {
|
||||
WARN_ON_ONCE(check_add_overflow(pos, len - 1, &tmp))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -1807,37 +1870,6 @@ int scoutfs_data_wait_check_iov(struct inode *inode, const struct iovec *iov,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_data_wait_check_iter(struct inode *inode, loff_t pos, struct iov_iter *iter,
|
||||
u8 sef, u8 op, struct scoutfs_data_wait *dw,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
size_t count = iov_iter_count(iter);
|
||||
size_t off = iter->iov_offset;
|
||||
const struct iovec *iov;
|
||||
size_t len;
|
||||
int ret = 0;
|
||||
|
||||
for (iov = iter->iov; count > 0; iov++) {
|
||||
len = iov->iov_len - off;
|
||||
if (len == 0)
|
||||
continue;
|
||||
|
||||
/* aren't we waiting on too much data here ? */
|
||||
ret = scoutfs_data_wait_check(inode, pos, len,
|
||||
sef, op, dw, lock);
|
||||
|
||||
if (ret != 0)
|
||||
break;
|
||||
|
||||
|
||||
pos += len;
|
||||
count -= len;
|
||||
off = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_data_wait(struct inode *inode, struct scoutfs_data_wait *dw)
|
||||
{
|
||||
DECLARE_DATA_WAIT_ROOT(inode->i_sb, rt);
|
||||
@@ -1926,8 +1958,244 @@ int scoutfs_data_waiting(struct super_block *sb, u64 ino, u64 iblock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_MM_VM_FAULT_T
|
||||
static vm_fault_t scoutfs_data_page_mkwrite(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
#else
|
||||
static int scoutfs_data_page_mkwrite(struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
#endif
|
||||
struct page *page = vmf->page;
|
||||
struct file *file = vma->vm_file;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
struct write_begin_data wbd;
|
||||
u64 ind_seq;
|
||||
loff_t pos;
|
||||
loff_t size;
|
||||
unsigned int len = PAGE_SIZE;
|
||||
vm_fault_t ret = VM_FAULT_SIGBUS;
|
||||
int err;
|
||||
|
||||
pos = vmf->pgoff << PAGE_SHIFT;
|
||||
|
||||
sb_start_pagefault(sb);
|
||||
|
||||
err = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (err) {
|
||||
ret = vmf_error(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
size = i_size_read(inode);
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
err = scoutfs_data_wait_check(inode, 0, size,
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE,
|
||||
&dw, lock);
|
||||
if (err != 0) {
|
||||
if (err < 0)
|
||||
ret = vmf_error(err);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* scoutfs_write_begin */
|
||||
memset(&wbd, 0, sizeof(wbd));
|
||||
INIT_LIST_HEAD(&wbd.ind_locks);
|
||||
wbd.lock = lock;
|
||||
|
||||
/*
|
||||
* Start transaction before taking page locks - we want to make sure we're
|
||||
* not locking a page, then waiting for trans, because writeback might race
|
||||
* against it and cause a lock inversion hang - as demonstrated by both
|
||||
* holetest and fsstress tests in xfstests.
|
||||
*/
|
||||
do {
|
||||
err = scoutfs_inode_index_start(sb, &ind_seq) ?:
|
||||
scoutfs_inode_index_prepare(sb, &wbd.ind_locks, inode,
|
||||
true) ?:
|
||||
scoutfs_inode_index_try_lock_hold(sb, &wbd.ind_locks,
|
||||
ind_seq, false);
|
||||
} while (err > 0);
|
||||
if (err < 0) {
|
||||
ret = vmf_error(err);
|
||||
goto out_trans;
|
||||
}
|
||||
|
||||
down_write(&si->extent_sem);
|
||||
|
||||
if (!trylock_page(page)) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out_sem;
|
||||
}
|
||||
ret = VM_FAULT_LOCKED;
|
||||
|
||||
if ((page->mapping != inode->i_mapping) ||
|
||||
(!PageUptodate(page)) ||
|
||||
(page_offset(page) > size)) {
|
||||
unlock_page(page);
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out_sem;
|
||||
}
|
||||
|
||||
if (page->index == (size - 1) >> PAGE_SHIFT)
|
||||
len = ((size - 1) & ~PAGE_MASK) + 1;
|
||||
|
||||
err = __block_write_begin(page, pos, PAGE_SIZE, scoutfs_get_block);
|
||||
if (err) {
|
||||
ret = vmf_error(err);
|
||||
unlock_page(page);
|
||||
goto out_sem;
|
||||
}
|
||||
/* end scoutfs_write_begin */
|
||||
|
||||
/*
|
||||
* We mark the page dirty already here so that when freeze is in
|
||||
* progress, we are guaranteed that writeback during freezing will
|
||||
* see the dirty page and writeprotect it again.
|
||||
*/
|
||||
set_page_dirty(page);
|
||||
wait_for_stable_page(page);
|
||||
|
||||
/* scoutfs_write_end */
|
||||
scoutfs_inode_set_data_seq(inode);
|
||||
scoutfs_inode_inc_data_version(inode);
|
||||
|
||||
file_update_time(vma->vm_file);
|
||||
|
||||
scoutfs_update_inode_item(inode, wbd.lock, &wbd.ind_locks);
|
||||
scoutfs_inode_queue_writeback(inode);
|
||||
|
||||
out_sem:
|
||||
up_write(&si->extent_sem);
|
||||
out_trans:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &wbd.ind_locks);
|
||||
/* end scoutfs_write_end */
|
||||
|
||||
out_unlock:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
|
||||
out:
|
||||
sb_end_pagefault(sb);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
/*
|
||||
* It'd be really nice to not hold the mmap_sem lock here
|
||||
* before waiting for data, and then return VM_FAULT_RETRY
|
||||
*/
|
||||
err = scoutfs_data_wait(inode, &dw);
|
||||
if (err == 0)
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
else
|
||||
ret = vmf_error(err);
|
||||
}
|
||||
|
||||
trace_scoutfs_data_page_mkwrite(sb, scoutfs_ino(inode), pos, (__force u32)ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_MM_VM_FAULT_T
|
||||
static vm_fault_t scoutfs_data_filemap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
#else
|
||||
static int scoutfs_data_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
#endif
|
||||
struct file *file = vma->vm_file;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
loff_t pos;
|
||||
int err;
|
||||
vm_fault_t ret = VM_FAULT_SIGBUS;
|
||||
|
||||
pos = vmf->pgoff;
|
||||
pos <<= PAGE_SHIFT;
|
||||
|
||||
retry:
|
||||
err = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
if (err < 0)
|
||||
return vmf_error(err);
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
/* protect checked extents from stage/release */
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
|
||||
err = scoutfs_data_wait_check(inode, pos, PAGE_SIZE,
|
||||
SEF_OFFLINE, SCOUTFS_IOC_DWO_READ,
|
||||
&dw, inode_lock);
|
||||
if (err != 0) {
|
||||
if (err < 0)
|
||||
ret = vmf_error(err);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KC_MM_VM_FAULT_T
|
||||
ret = filemap_fault(vmf);
|
||||
#else
|
||||
ret = filemap_fault(vma, vmf);
|
||||
#endif
|
||||
|
||||
out:
|
||||
if (scoutfs_per_task_del(&si->pt_data_lock, &pt_ent))
|
||||
kc_inode_dio_end(inode);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
err = scoutfs_data_wait(inode, &dw);
|
||||
if (err == 0)
|
||||
goto retry;
|
||||
|
||||
ret = VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
trace_scoutfs_data_filemap_fault(sb, scoutfs_ino(inode), pos, (__force u32)ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct scoutfs_data_file_vm_ops = {
|
||||
.fault = scoutfs_data_filemap_fault,
|
||||
.page_mkwrite = scoutfs_data_page_mkwrite,
|
||||
#ifdef KC_MM_REMAP_PAGES
|
||||
.remap_pages = generic_file_remap_pages,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int scoutfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &scoutfs_data_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct address_space_operations scoutfs_file_aops = {
|
||||
#ifdef KC_MPAGE_READ_FOLIO
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.read_folio = scoutfs_read_folio,
|
||||
#else
|
||||
.readpage = scoutfs_readpage,
|
||||
#endif
|
||||
#ifndef KC_FILE_AOPS_READAHEAD
|
||||
.readpages = scoutfs_readpages,
|
||||
#else
|
||||
@@ -1948,7 +2216,10 @@ const struct file_operations scoutfs_file_fops = {
|
||||
#else
|
||||
.read_iter = scoutfs_file_read_iter,
|
||||
.write_iter = scoutfs_file_write_iter,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
#endif
|
||||
.mmap = scoutfs_file_mmap,
|
||||
.unlocked_ioctl = scoutfs_ioctl,
|
||||
.fsync = scoutfs_file_fsync,
|
||||
.llseek = scoutfs_file_llseek,
|
||||
|
||||
@@ -65,9 +65,6 @@ int scoutfs_data_wait_check_iov(struct inode *inode, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, u8 sef,
|
||||
u8 op, struct scoutfs_data_wait *ow,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_data_wait_check_iter(struct inode *inode, loff_t pos, struct iov_iter *iter,
|
||||
u8 sef, u8 op, struct scoutfs_data_wait *ow,
|
||||
struct scoutfs_lock *lock);
|
||||
bool scoutfs_data_wait_found(struct scoutfs_data_wait *ow);
|
||||
int scoutfs_data_wait(struct inode *inode,
|
||||
struct scoutfs_data_wait *ow);
|
||||
|
||||
216
kmod/src/dir.c
216
kmod/src/dir.c
@@ -11,11 +11,13 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "file.h"
|
||||
@@ -34,6 +36,7 @@
|
||||
#include "forest.h"
|
||||
#include "acl.h"
|
||||
#include "counters.h"
|
||||
#include "quota.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -433,6 +436,15 @@ out:
|
||||
return d_splice_alias(inode, dentry);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to make iterating through dirent ptrs aligned
|
||||
*/
|
||||
static inline struct scoutfs_dirent *next_aligned_dirent(struct scoutfs_dirent *dent, u8 len)
|
||||
{
|
||||
return (void *)dent +
|
||||
ALIGN(offsetof(struct scoutfs_dirent, name[len]), __alignof__(struct scoutfs_dirent));
|
||||
}
|
||||
|
||||
/*
|
||||
* readdir simply iterates over the dirent items for the dir inode and
|
||||
* uses their offset as the readdir position.
|
||||
@@ -440,76 +452,112 @@ out:
|
||||
* It will need to be careful not to read past the region of the dirent
|
||||
* hash offset keys that it has access to.
|
||||
*/
|
||||
static int KC_DECLARE_READDIR(scoutfs_readdir, struct file *file,
|
||||
void *dirent, kc_readdir_ctx_t ctx)
|
||||
static int scoutfs_readdir(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
struct scoutfs_dirent *dent = NULL;
|
||||
/* we'll store name_len in dent->__pad[0] */
|
||||
#define hacky_name_len __pad[0]
|
||||
struct scoutfs_key last_key;
|
||||
struct scoutfs_key key;
|
||||
struct page *page = NULL;
|
||||
int name_len;
|
||||
u64 pos;
|
||||
int entries = 0;
|
||||
int ret;
|
||||
int complete = 0;
|
||||
struct scoutfs_dirent *end;
|
||||
|
||||
if (!kc_dir_emit_dots(file, dirent, ctx))
|
||||
if (!dir_emit_dots(file, ctx))
|
||||
return 0;
|
||||
|
||||
dent = alloc_dirent(SCOUTFS_NAME_LEN);
|
||||
if (!dent) {
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
end = page_address(page) + PAGE_SIZE;
|
||||
|
||||
init_dirent_key(&last_key, SCOUTFS_READDIR_TYPE, scoutfs_ino(inode),
|
||||
SCOUTFS_DIRENT_LAST_POS, 0);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &dir_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* lock and fetch dirent items, until the page no longer fits
|
||||
* a max size dirent (288b). Then unlock and dir_emit the ones
|
||||
* we stored in the page.
|
||||
*/
|
||||
for (;;) {
|
||||
init_dirent_key(&key, SCOUTFS_READDIR_TYPE, scoutfs_ino(inode),
|
||||
kc_readdir_pos(file, ctx), 0);
|
||||
/* lock */
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &dir_lock);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, dent,
|
||||
dirent_bytes(SCOUTFS_NAME_LEN),
|
||||
dir_lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
dent = page_address(page);
|
||||
pos = ctx->pos;
|
||||
while (next_aligned_dirent(dent, SCOUTFS_NAME_LEN) < end) {
|
||||
init_dirent_key(&key, SCOUTFS_READDIR_TYPE, scoutfs_ino(inode),
|
||||
pos, 0);
|
||||
|
||||
ret = scoutfs_item_next(sb, &key, &last_key, dent,
|
||||
dirent_bytes(SCOUTFS_NAME_LEN),
|
||||
dir_lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
complete = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
name_len = ret - sizeof(struct scoutfs_dirent);
|
||||
dent->hacky_name_len = name_len;
|
||||
if (name_len < 1 || name_len > SCOUTFS_NAME_LEN) {
|
||||
scoutfs_corruption(sb, SC_DIRENT_READDIR_NAME_LEN,
|
||||
corrupt_dirent_readdir_name_len,
|
||||
"dir_ino %llu pos %llu key "SK_FMT" len %d",
|
||||
scoutfs_ino(inode),
|
||||
pos,
|
||||
SK_ARG(&key), name_len);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
pos = le64_to_cpu(dent->pos) + 1;
|
||||
|
||||
dent = next_aligned_dirent(dent, name_len);
|
||||
entries++;
|
||||
}
|
||||
|
||||
/* unlock */
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
dent = page_address(page);
|
||||
for (; entries > 0; entries--) {
|
||||
ctx->pos = le64_to_cpu(dent->pos);
|
||||
if (!dir_emit(ctx, dent->name, dent->hacky_name_len,
|
||||
le64_to_cpu(dent->ino),
|
||||
dentry_type(dent->type))) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dent = next_aligned_dirent(dent, dent->hacky_name_len);
|
||||
|
||||
/* always advance ctx->pos past */
|
||||
ctx->pos++;
|
||||
}
|
||||
|
||||
if (complete)
|
||||
break;
|
||||
}
|
||||
|
||||
name_len = ret - sizeof(struct scoutfs_dirent);
|
||||
if (name_len < 1 || name_len > SCOUTFS_NAME_LEN) {
|
||||
scoutfs_corruption(sb, SC_DIRENT_READDIR_NAME_LEN,
|
||||
corrupt_dirent_readdir_name_len,
|
||||
"dir_ino %llu pos %llu key "SK_FMT" len %d",
|
||||
scoutfs_ino(inode),
|
||||
kc_readdir_pos(file, ctx),
|
||||
SK_ARG(&key), name_len);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pos = le64_to_cpu(key.skd_major);
|
||||
kc_readdir_pos(file, ctx) = pos;
|
||||
|
||||
if (!kc_dir_emit(ctx, dirent, dent->name, name_len, pos,
|
||||
le64_to_cpu(dent->ino),
|
||||
dentry_type(dent->type))) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
kc_readdir_pos(file, ctx) = pos + 1;
|
||||
}
|
||||
|
||||
out:
|
||||
scoutfs_unlock(sb, dir_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
kfree(dent);
|
||||
if (page)
|
||||
__free_page(page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -651,6 +699,10 @@ static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = scoutfs_quota_check_inode(sb, dir);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (orph_lock) {
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, ino, orph_lock);
|
||||
if (ret < 0)
|
||||
@@ -672,6 +724,8 @@ retry:
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_inode_set_proj(inode, scoutfs_inode_get_proj(dir));
|
||||
|
||||
ret = scoutfs_dirty_inode_item(dir, *dir_lock);
|
||||
out:
|
||||
if (ret)
|
||||
@@ -696,8 +750,9 @@ out_unlock:
|
||||
return inode;
|
||||
}
|
||||
|
||||
static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
dev_t rdev)
|
||||
static int scoutfs_mknod(KC_VFS_NS_DEF
|
||||
struct inode *dir,
|
||||
struct dentry *dentry, umode_t mode, dev_t rdev)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct inode *inode = NULL;
|
||||
@@ -766,15 +821,20 @@ out:
|
||||
}
|
||||
|
||||
/* XXX hmm, do something with excl? */
|
||||
static int scoutfs_create(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, bool excl)
|
||||
static int scoutfs_create(KC_VFS_NS_DEF
|
||||
struct inode *dir,
|
||||
struct dentry *dentry, umode_t mode, bool excl)
|
||||
{
|
||||
return scoutfs_mknod(dir, dentry, mode | S_IFREG, 0);
|
||||
return scoutfs_mknod(KC_VFS_NS
|
||||
dir, dentry, mode | S_IFREG, 0);
|
||||
}
|
||||
|
||||
static int scoutfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||
static int scoutfs_mkdir(KC_VFS_NS_DEF
|
||||
struct inode *dir,
|
||||
struct dentry *dentry, umode_t mode)
|
||||
{
|
||||
return scoutfs_mknod(dir, dentry, mode | S_IFDIR, 0);
|
||||
return scoutfs_mknod(KC_VFS_NS
|
||||
dir, dentry, mode | S_IFDIR, 0);
|
||||
}
|
||||
|
||||
static int scoutfs_link(struct dentry *old_dentry,
|
||||
@@ -926,12 +986,16 @@ static int scoutfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
|
||||
|
||||
ret = lookup_dirent(sb, scoutfs_ino(dir), dentry->d_name.name, dentry->d_name.len, hash,
|
||||
&dent, dir_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
goto unlock;
|
||||
|
||||
if (should_orphan(inode)) {
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, scoutfs_ino(inode),
|
||||
@@ -1165,7 +1229,8 @@ static const char *scoutfs_get_link(struct dentry *dentry, struct inode *inode,
|
||||
* Symlink target paths can be annoyingly large. We store relatively
|
||||
* rare large paths in multiple items.
|
||||
*/
|
||||
static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
static int scoutfs_symlink(KC_VFS_NS_DEF
|
||||
struct inode *dir, struct dentry *dentry,
|
||||
const char *symname)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
@@ -1552,7 +1617,8 @@ static int verify_ancestors(struct super_block *sb, u64 p1, u64 p2,
|
||||
* from using parent/child locking orders as two groups can have both
|
||||
* parent and child relationships to each other.
|
||||
*/
|
||||
static int scoutfs_rename_common(struct inode *old_dir,
|
||||
static int scoutfs_rename_common(KC_VFS_NS_DEF
|
||||
struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
struct dentry *new_dentry, unsigned int flags)
|
||||
{
|
||||
@@ -1632,6 +1698,10 @@ static int scoutfs_rename_common(struct inode *old_dir,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if ((old_inode && (ret = scoutfs_inode_check_retention(old_inode))) ||
|
||||
(new_inode && (ret = scoutfs_inode_check_retention(new_inode))))
|
||||
goto out_unlock;
|
||||
|
||||
if (should_orphan(new_inode)) {
|
||||
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, scoutfs_ino(new_inode),
|
||||
&orph_lock);
|
||||
@@ -1742,7 +1812,7 @@ retry:
|
||||
}
|
||||
old_inode->i_ctime = now;
|
||||
if (new_inode)
|
||||
old_inode->i_ctime = now;
|
||||
new_inode->i_ctime = now;
|
||||
|
||||
inode_inc_iversion(old_dir);
|
||||
inode_inc_iversion(old_inode);
|
||||
@@ -1825,18 +1895,21 @@ static int scoutfs_rename(struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
struct dentry *new_dentry)
|
||||
{
|
||||
return scoutfs_rename_common(old_dir, old_dentry, new_dir, new_dentry, 0);
|
||||
return scoutfs_rename_common(KC_VFS_INIT_NS
|
||||
old_dir, old_dentry, new_dir, new_dentry, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int scoutfs_rename2(struct inode *old_dir,
|
||||
static int scoutfs_rename2(KC_VFS_NS_DEF
|
||||
struct inode *old_dir,
|
||||
struct dentry *old_dentry, struct inode *new_dir,
|
||||
struct dentry *new_dentry, unsigned int flags)
|
||||
{
|
||||
if (flags & ~RENAME_NOREPLACE)
|
||||
return -EINVAL;
|
||||
|
||||
return scoutfs_rename_common(old_dir, old_dentry, new_dir, new_dentry, flags);
|
||||
return scoutfs_rename_common(KC_VFS_NS
|
||||
old_dir, old_dentry, new_dir, new_dentry, flags);
|
||||
}
|
||||
|
||||
#ifdef KC_FMODE_KABI_ITERATE
|
||||
@@ -1848,8 +1921,18 @@ static int scoutfs_dir_open(struct inode *inode, struct file *file)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||
static int scoutfs_tmpfile(KC_VFS_NS_DEF
|
||||
struct inode *dir,
|
||||
#ifdef KC_D_TMPFILE_DENTRY
|
||||
struct dentry *dentry,
|
||||
#else
|
||||
struct file *file,
|
||||
#endif
|
||||
umode_t mode)
|
||||
{
|
||||
#ifndef KC_D_TMPFILE_DENTRY
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
#endif
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct inode *inode = NULL;
|
||||
struct scoutfs_lock *dir_lock = NULL;
|
||||
@@ -1876,7 +1959,11 @@ static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mod
|
||||
si->crtime = inode->i_mtime;
|
||||
insert_inode_hash(inode);
|
||||
ihold(inode); /* need to update inode modifications in d_tmpfile */
|
||||
#ifdef KC_D_TMPFILE_DENTRY
|
||||
d_tmpfile(dentry, inode);
|
||||
#else
|
||||
d_tmpfile(file, inode);
|
||||
#endif
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_forest_inc_inode_count(sb);
|
||||
|
||||
@@ -1884,6 +1971,10 @@ static int scoutfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mod
|
||||
scoutfs_update_inode_item(dir, dir_lock, &ind_locks);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
|
||||
#ifndef KC_D_TMPFILE_DENTRY
|
||||
ret = finish_open_simple(file, 0);
|
||||
#endif
|
||||
|
||||
out:
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
@@ -1929,7 +2020,7 @@ const struct inode_operations scoutfs_symlink_iops = {
|
||||
};
|
||||
|
||||
const struct file_operations scoutfs_dir_fops = {
|
||||
.KC_FOP_READDIR = scoutfs_readdir,
|
||||
.iterate = scoutfs_readdir,
|
||||
#ifdef KC_FMODE_KABI_ITERATE
|
||||
.open = scoutfs_dir_open,
|
||||
#endif
|
||||
@@ -1962,6 +2053,9 @@ const struct inode_operations scoutfs_dir_iops = {
|
||||
#endif
|
||||
.listxattr = scoutfs_listxattr,
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
.set_acl = scoutfs_set_acl,
|
||||
#endif
|
||||
.symlink = scoutfs_symlink,
|
||||
.permission = scoutfs_permission,
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
|
||||
@@ -105,12 +105,12 @@ static ssize_t elapsed_secs_show(struct kobject *kobj,
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
ktime_t now = ktime_get();
|
||||
struct timeval tv = { 0, };
|
||||
ktime_t t = ns_to_ktime(0);
|
||||
|
||||
if (ktime_after(now, fence->start_kt))
|
||||
tv = ktime_to_timeval(ktime_sub(now, fence->start_kt));
|
||||
t = ktime_sub(now, fence->start_kt);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)tv.tv_sec);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)ktime_divns(t, NSEC_PER_SEC));
|
||||
}
|
||||
SCOUTFS_ATTR_RO(elapsed_secs);
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "inode.h"
|
||||
#include "per_task.h"
|
||||
#include "omap.h"
|
||||
#include "quota.h"
|
||||
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
/*
|
||||
@@ -108,6 +109,10 @@ retry:
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -122,6 +127,10 @@ retry:
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||
@@ -171,10 +180,8 @@ retry:
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
ret = scoutfs_data_wait_check_iter(inode, iocb->ki_pos, to,
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ,
|
||||
&dw, scoutfs_inode_lock);
|
||||
ret = scoutfs_data_wait_check(inode, iocb->ki_pos, iov_iter_count(to), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
@@ -205,8 +212,7 @@ ssize_t scoutfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
int written;
|
||||
ssize_t ret;
|
||||
|
||||
retry:
|
||||
inode_lock(inode);
|
||||
@@ -219,23 +225,29 @@ retry:
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check_iter(inode, iocb->ki_pos, from,
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE,
|
||||
&dw, scoutfs_inode_lock);
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
written = __generic_file_write_iter(iocb, from);
|
||||
ret = __generic_file_write_iter(iocb, from);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
@@ -248,14 +260,15 @@ out:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret > 0 || ret == -EIOCBQUEUED)
|
||||
ret = generic_write_sync(iocb, written);
|
||||
if (ret > 0)
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
|
||||
return written ? written : ret;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int scoutfs_permission(struct inode *inode, int mask)
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
@@ -269,7 +282,8 @@ int scoutfs_permission(struct inode *inode, int mask)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = generic_permission(inode, mask);
|
||||
ret = generic_permission(KC_VFS_INIT_NS
|
||||
inode, mask);
|
||||
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *, struct iov_iter *);
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *, struct iov_iter *);
|
||||
#endif
|
||||
int scoutfs_permission(struct inode *inode, int mask);
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask);
|
||||
loff_t scoutfs_file_llseek(struct file *file, loff_t offset, int whence);
|
||||
|
||||
#endif /* _SCOUTFS_FILE_H_ */
|
||||
|
||||
@@ -238,19 +238,16 @@ static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u6
|
||||
* We return -ESTALE if we hit stale blocks to give the caller a chance
|
||||
* to reset their state and retry with a newer version of the btrees.
|
||||
*/
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
struct forest_read_items_data rid = {
|
||||
.cb = cb,
|
||||
.cb_arg = arg,
|
||||
};
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_bloom_block *bb;
|
||||
struct forest_bloom_nrs bloom;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
@@ -264,18 +261,14 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
scoutfs_inc_counter(sb, forest_read_items);
|
||||
calc_bloom_nrs(&bloom, bloom_key);
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
trace_scoutfs_forest_using_roots(sb, &roots->fs_root, &roots->logs_root);
|
||||
|
||||
*start = orig_start;
|
||||
*end = orig_end;
|
||||
|
||||
/* start with fs root items */
|
||||
rid.fic |= FIC_FS_ROOT;
|
||||
ret = scoutfs_btree_read_items(sb, &roots.fs_root, key, start, end,
|
||||
ret = scoutfs_btree_read_items(sb, &roots->fs_root, key, start, end,
|
||||
forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -283,7 +276,7 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &roots.logs_root, <k, &iref);
|
||||
ret = scoutfs_btree_next(sb, &roots->logs_root, <k, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(lt)) {
|
||||
ltk = *iref.key;
|
||||
@@ -340,6 +333,23 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
struct scoutfs_net_roots roots;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_forest_read_items_roots(sb, &roots, key, bloom_key, start, end,
|
||||
cb, arg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the items are deltas then combine the src with the destination
|
||||
* value and store the result in the destination.
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
struct scoutfs_block;
|
||||
struct scoutfs_lock;
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
@@ -23,6 +24,10 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock);
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq);
|
||||
|
||||
@@ -8,9 +8,14 @@
|
||||
*/
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN_STR __stringify(SCOUTFS_FORMAT_VERSION_MIN)
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 2
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX_STR __stringify(SCOUTFS_FORMAT_VERSION_MAX)
|
||||
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_RETENTION 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_QUOTA 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_INDX_TAG 2
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
|
||||
@@ -175,6 +180,10 @@ struct scoutfs_key {
|
||||
#define sko_rid _sk_first
|
||||
#define sko_ino _sk_second
|
||||
|
||||
/* quota rules */
|
||||
#define skqr_hash _sk_second
|
||||
#define skqr_coll_nr _sk_third
|
||||
|
||||
/* xattr totl */
|
||||
#define skxt_a _sk_first
|
||||
#define skxt_b _sk_second
|
||||
@@ -461,7 +470,7 @@ struct scoutfs_srch_compact {
|
||||
* @get_trans_seq, @commit_trans_seq: These pair of sequence numbers
|
||||
* determine if a transaction is currently open for the mount that owns
|
||||
* the log_trees struct. get_trans_seq is advanced by the server as the
|
||||
* transaction is opened. The server sets comimt_trans_seq equal to
|
||||
* transaction is opened. The server sets commit_trans_seq equal to
|
||||
* get_ as the transaction is committed.
|
||||
*/
|
||||
struct scoutfs_log_trees {
|
||||
@@ -585,7 +594,9 @@ struct scoutfs_log_merge_freeing {
|
||||
*/
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 4
|
||||
#define SCOUTFS_ORPHAN_ZONE 8
|
||||
#define SCOUTFS_QUOTA_ZONE 10
|
||||
#define SCOUTFS_XATTR_TOTL_ZONE 12
|
||||
#define SCOUTFS_XATTR_INDX_ZONE 14
|
||||
#define SCOUTFS_FS_ZONE 16
|
||||
#define SCOUTFS_LOCK_ZONE 20
|
||||
/* Items only stored in server btrees */
|
||||
@@ -608,6 +619,9 @@ struct scoutfs_log_merge_freeing {
|
||||
/* orphan zone, redundant type used for clarity */
|
||||
#define SCOUTFS_ORPHAN_TYPE 4
|
||||
|
||||
/* quota zone */
|
||||
#define SCOUTFS_QUOTA_RULE_TYPE 4
|
||||
|
||||
/* fs zone */
|
||||
#define SCOUTFS_INODE_TYPE 4
|
||||
#define SCOUTFS_XATTR_TYPE 8
|
||||
@@ -661,6 +675,34 @@ struct scoutfs_xattr_totl_val {
|
||||
__le64 count;
|
||||
};
|
||||
|
||||
#define SQ_RF_TOTL_COUNT (1 << 0)
|
||||
#define SQ_RF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_NS_LITERAL 0
|
||||
#define SQ_NS_PROJ 1
|
||||
#define SQ_NS_UID 2
|
||||
#define SQ_NS_GID 3
|
||||
#define SQ_NS__NR 4
|
||||
#define SQ_NS__NR_SELECT (SQ_NS__NR - 1) /* !literal */
|
||||
|
||||
#define SQ_NF_SELECT (1 << 0)
|
||||
#define SQ_NF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_OP_INODE 0
|
||||
#define SQ_OP_DATA 1
|
||||
#define SQ_OP__NR 2
|
||||
|
||||
struct scoutfs_quota_rule_val {
|
||||
__le64 name_val[3];
|
||||
__le64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
/* XXX does this exist upstream somewhere? */
|
||||
#define member_sizeof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
|
||||
|
||||
@@ -859,9 +901,38 @@ struct scoutfs_inode {
|
||||
struct scoutfs_timespec ctime;
|
||||
struct scoutfs_timespec mtime;
|
||||
struct scoutfs_timespec crtime;
|
||||
__le64 proj;
|
||||
};
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INODE_FMT_V1_BYTES offsetof(struct scoutfs_inode, proj)
|
||||
|
||||
/*
|
||||
* There are so few versions that we don't mind doing this work inline
|
||||
* so that both utils and kernel can share these. Mounting has already
|
||||
* checked that the format version is within the supported min and max,
|
||||
* so these functions only deal with size variance within that band.
|
||||
*/
|
||||
/* Returns the native written inode size for the given format version, 0 for bad version */
|
||||
static inline int scoutfs_inode_vers_bytes(__u64 fmt_vers)
|
||||
{
|
||||
if (fmt_vers == 1)
|
||||
return SCOUTFS_INODE_FMT_V1_BYTES;
|
||||
else
|
||||
return sizeof(struct scoutfs_inode);
|
||||
}
|
||||
/*
|
||||
* Returns true if bytes is a valid inode size to read from the given
|
||||
* version. The given version must be greater than the version that
|
||||
* introduced the size.
|
||||
*/
|
||||
static inline int scoutfs_inode_valid_vers_bytes(__u64 fmt_vers, int bytes)
|
||||
{
|
||||
return (bytes == sizeof(struct scoutfs_inode) && fmt_vers == SCOUTFS_FORMAT_VERSION_MAX) ||
|
||||
(bytes == SCOUTFS_INODE_FMT_V1_BYTES);
|
||||
}
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INO_FLAG_RETENTION 0x2
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
|
||||
@@ -1020,7 +1091,8 @@ enum scoutfs_net_cmd {
|
||||
EXPAND_NET_ERRNO(ENOMEM) \
|
||||
EXPAND_NET_ERRNO(EIO) \
|
||||
EXPAND_NET_ERRNO(ENOSPC) \
|
||||
EXPAND_NET_ERRNO(EINVAL)
|
||||
EXPAND_NET_ERRNO(EINVAL) \
|
||||
EXPAND_NET_ERRNO(ENOLINK)
|
||||
|
||||
#undef EXPAND_NET_ERRNO
|
||||
#define EXPAND_NET_ERRNO(which) SCOUTFS_NET_ERR_##which,
|
||||
|
||||
209
kmod/src/inode.c
209
kmod/src/inode.c
@@ -91,7 +91,7 @@ static void scoutfs_inode_ctor(void *obj)
|
||||
|
||||
init_rwsem(&si->extent_sem);
|
||||
mutex_init(&si->item_mutex);
|
||||
seqcount_init(&si->seqcount);
|
||||
seqlock_init(&si->seqlock);
|
||||
si->staging = false;
|
||||
scoutfs_per_task_init(&si->pt_data_lock);
|
||||
atomic64_set(&si->data_waitq.changed, 0);
|
||||
@@ -150,6 +150,9 @@ static const struct inode_operations scoutfs_file_iops = {
|
||||
#endif
|
||||
.listxattr = scoutfs_listxattr,
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
.set_acl = scoutfs_set_acl,
|
||||
#endif
|
||||
.fiemap = scoutfs_data_fiemap,
|
||||
};
|
||||
|
||||
@@ -163,6 +166,9 @@ static const struct inode_operations scoutfs_special_iops = {
|
||||
#endif
|
||||
.listxattr = scoutfs_listxattr,
|
||||
.get_acl = scoutfs_get_acl,
|
||||
#ifdef KC_GET_ACL_DENTRY
|
||||
.set_acl = scoutfs_set_acl,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -250,7 +256,7 @@ static void set_item_info(struct scoutfs_inode_info *si,
|
||||
set_item_major(si, SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE, sinode->data_seq);
|
||||
}
|
||||
|
||||
static void load_inode(struct inode *inode, struct scoutfs_inode *cinode)
|
||||
static void load_inode(struct inode *inode, struct scoutfs_inode *cinode, int inode_bytes)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
@@ -278,6 +284,7 @@ static void load_inode(struct inode *inode, struct scoutfs_inode *cinode)
|
||||
si->flags = le32_to_cpu(cinode->flags);
|
||||
si->crtime.tv_sec = le64_to_cpu(cinode->crtime.sec);
|
||||
si->crtime.tv_nsec = le32_to_cpu(cinode->crtime.nsec);
|
||||
si->proj = le64_to_cpu(cinode->proj);
|
||||
|
||||
/*
|
||||
* i_blocks is initialized from online and offline and is then
|
||||
@@ -298,6 +305,24 @@ void scoutfs_inode_init_key(struct scoutfs_key *key, u64 ino)
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Read an inode item into the caller's buffer and return the size that
|
||||
* we read. Returns errors if the inode size is unsupported or doesn't
|
||||
* make sense for the format version.
|
||||
*/
|
||||
static int lookup_inode_item(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_inode *sinode, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_item_lookup_smaller_zero(sb, key, sinode, sizeof(struct scoutfs_inode), lock);
|
||||
if (ret >= 0 && !scoutfs_inode_valid_vers_bytes(sbi->fmt_vers, ret))
|
||||
return -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Refresh the vfs inode fields if the lock indicates that the current
|
||||
* contents could be stale.
|
||||
@@ -333,12 +358,12 @@ int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock)
|
||||
|
||||
mutex_lock(&si->item_mutex);
|
||||
if (atomic64_read(&si->last_refreshed) < refresh_gen) {
|
||||
ret = scoutfs_item_lookup_exact(sb, &key, &sinode,
|
||||
sizeof(sinode), lock);
|
||||
if (ret == 0) {
|
||||
load_inode(inode, &sinode);
|
||||
ret = lookup_inode_item(sb, &key, &sinode, lock);
|
||||
if (ret > 0) {
|
||||
load_inode(inode, &sinode, ret);
|
||||
atomic64_set(&si->last_refreshed, refresh_gen);
|
||||
scoutfs_lock_add_coverage(sb, lock, &si->ino_lock_cov);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
ret = 0;
|
||||
@@ -354,7 +379,8 @@ int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
#else
|
||||
int scoutfs_getattr(const struct path *path, struct kstat *stat,
|
||||
int scoutfs_getattr(KC_VFS_NS_DEF
|
||||
const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags)
|
||||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
@@ -366,7 +392,8 @@ int scoutfs_getattr(const struct path *path, struct kstat *stat,
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret == 0) {
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(KC_VFS_INIT_NS
|
||||
inode, stat);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
}
|
||||
return ret;
|
||||
@@ -464,7 +491,8 @@ int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock)
|
||||
* re-acquire it. Ideally we'd fix this so that we can acquire the lock
|
||||
* instead of the caller.
|
||||
*/
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
int scoutfs_setattr(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct iattr *attr)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
@@ -482,10 +510,15 @@ retry:
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = setattr_prepare(dentry, attr);
|
||||
ret = setattr_prepare(KC_VFS_INIT_NS
|
||||
dentry, attr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
attr_size = (attr->ia_valid & ATTR_SIZE) ? attr->ia_size :
|
||||
i_size_read(inode);
|
||||
|
||||
@@ -542,7 +575,8 @@ retry:
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
setattr_copy(inode, attr);
|
||||
setattr_copy(KC_VFS_INIT_NS
|
||||
inode, attr);
|
||||
inode_inc_iversion(inode);
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
@@ -566,11 +600,9 @@ static void set_trans_seq(struct inode *inode, u64 *seq)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (*seq != sbi->trans_seq) {
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&si->seqcount);
|
||||
write_seqlock(&si->seqlock);
|
||||
*seq = sbi->trans_seq;
|
||||
write_seqcount_end(&si->seqcount);
|
||||
preempt_enable();
|
||||
write_sequnlock(&si->seqlock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -592,22 +624,18 @@ void scoutfs_inode_inc_data_version(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&si->seqcount);
|
||||
write_seqlock(&si->seqlock);
|
||||
si->data_version++;
|
||||
write_seqcount_end(&si->seqcount);
|
||||
preempt_enable();
|
||||
write_sequnlock(&si->seqlock);
|
||||
}
|
||||
|
||||
void scoutfs_inode_set_data_version(struct inode *inode, u64 data_version)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&si->seqcount);
|
||||
write_seqlock(&si->seqlock);
|
||||
si->data_version = data_version;
|
||||
write_seqcount_end(&si->seqcount);
|
||||
preempt_enable();
|
||||
write_sequnlock(&si->seqlock);
|
||||
}
|
||||
|
||||
void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
|
||||
@@ -616,8 +644,7 @@ void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
|
||||
|
||||
if (inode && (on || off)) {
|
||||
si = SCOUTFS_I(inode);
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&si->seqcount);
|
||||
write_seqlock(&si->seqlock);
|
||||
|
||||
/* inode and extents out of sync, bad callers */
|
||||
if (((s64)si->online_blocks + on < 0) ||
|
||||
@@ -638,8 +665,7 @@ void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
|
||||
si->online_blocks,
|
||||
si->offline_blocks);
|
||||
|
||||
write_seqcount_end(&si->seqcount);
|
||||
preempt_enable();
|
||||
write_sequnlock(&si->seqlock);
|
||||
}
|
||||
|
||||
/* any time offline extents decreased we try and wake waiters */
|
||||
@@ -647,16 +673,16 @@ void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
|
||||
scoutfs_data_wait_changed(inode);
|
||||
}
|
||||
|
||||
static u64 read_seqcount_u64(struct inode *inode, u64 *val)
|
||||
static u64 read_seqlock_u64(struct inode *inode, u64 *val)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
unsigned int seq;
|
||||
unsigned seq;
|
||||
u64 v;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&si->seqcount);
|
||||
seq = read_seqbegin(&si->seqlock);
|
||||
v = *val;
|
||||
} while (read_seqcount_retry(&si->seqcount, seq));
|
||||
} while (read_seqretry(&si->seqlock, seq));
|
||||
|
||||
return v;
|
||||
}
|
||||
@@ -665,33 +691,82 @@ u64 scoutfs_inode_meta_seq(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
return read_seqcount_u64(inode, &si->meta_seq);
|
||||
return read_seqlock_u64(inode, &si->meta_seq);
|
||||
}
|
||||
|
||||
u64 scoutfs_inode_data_seq(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
return read_seqcount_u64(inode, &si->data_seq);
|
||||
return read_seqlock_u64(inode, &si->data_seq);
|
||||
}
|
||||
|
||||
u64 scoutfs_inode_data_version(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
return read_seqcount_u64(inode, &si->data_version);
|
||||
return read_seqlock_u64(inode, &si->data_version);
|
||||
}
|
||||
|
||||
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
unsigned int seq;
|
||||
unsigned seq;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&si->seqcount);
|
||||
seq = read_seqbegin(&si->seqlock);
|
||||
*on = SCOUTFS_I(inode)->online_blocks;
|
||||
*off = SCOUTFS_I(inode)->offline_blocks;
|
||||
} while (read_seqcount_retry(&si->seqcount, seq));
|
||||
} while (read_seqretry(&si->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Get our private scoutfs inode flags, not the vfs i_flags.
|
||||
*/
|
||||
u32 scoutfs_inode_get_flags(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
unsigned seq;
|
||||
u32 flags;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&si->seqlock);
|
||||
flags = si->flags;
|
||||
} while (read_seqretry(&si->seqlock, seq));
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
void scoutfs_inode_set_flags(struct inode *inode, u32 and, u32 or)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
write_seqlock(&si->seqlock);
|
||||
si->flags = (si->flags & and) | or;
|
||||
write_sequnlock(&si->seqlock);
|
||||
}
|
||||
|
||||
u64 scoutfs_inode_get_proj(struct inode *inode)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
unsigned seq;
|
||||
u64 proj;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&si->seqlock);
|
||||
proj = si->proj;
|
||||
} while (read_seqretry(&si->seqlock, seq));
|
||||
|
||||
return proj;
|
||||
}
|
||||
|
||||
void scoutfs_inode_set_proj(struct inode *inode, u64 proj)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
|
||||
write_seqlock(&si->seqlock);
|
||||
si->proj = proj;
|
||||
write_sequnlock(&si->seqlock);
|
||||
}
|
||||
|
||||
static int scoutfs_iget_test(struct inode *inode, void *arg)
|
||||
@@ -803,7 +878,7 @@ out:
|
||||
return inode;
|
||||
}
|
||||
|
||||
static void store_inode(struct scoutfs_inode *cinode, struct inode *inode)
|
||||
static void store_inode(struct scoutfs_inode *cinode, struct inode *inode, int inode_bytes)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
u64 online_blocks;
|
||||
@@ -839,6 +914,7 @@ static void store_inode(struct scoutfs_inode *cinode, struct inode *inode)
|
||||
cinode->crtime.sec = cpu_to_le64(si->crtime.tv_sec);
|
||||
cinode->crtime.nsec = cpu_to_le32(si->crtime.tv_nsec);
|
||||
memset(cinode->crtime.__pad, 0, sizeof(cinode->crtime.__pad));
|
||||
cinode->proj = cpu_to_le64(si->proj);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -862,15 +938,18 @@ static void store_inode(struct scoutfs_inode *cinode, struct inode *inode)
|
||||
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_inode sinode;
|
||||
struct scoutfs_key key;
|
||||
int inode_bytes;
|
||||
int ret;
|
||||
|
||||
store_inode(&sinode, inode);
|
||||
inode_bytes = scoutfs_inode_vers_bytes(sbi->fmt_vers);
|
||||
store_inode(&sinode, inode, inode_bytes);
|
||||
|
||||
scoutfs_inode_init_key(&key, scoutfs_ino(inode));
|
||||
|
||||
ret = scoutfs_item_update(sb, &key, &sinode, sizeof(sinode), lock);
|
||||
ret = scoutfs_item_update(sb, &key, &sinode, inode_bytes, lock);
|
||||
if (!ret)
|
||||
trace_scoutfs_dirty_inode(inode);
|
||||
return ret;
|
||||
@@ -911,10 +990,10 @@ static bool inode_has_index(umode_t mode, u8 type)
|
||||
}
|
||||
}
|
||||
|
||||
static int cmp_index_lock(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_index_lock(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct index_lock *a = list_entry(A, struct index_lock, head);
|
||||
struct index_lock *b = list_entry(B, struct index_lock, head);
|
||||
KC_LIST_CMP_CONST struct index_lock *a = list_entry(A, KC_LIST_CMP_CONST struct index_lock, head);
|
||||
KC_LIST_CMP_CONST struct index_lock *b = list_entry(B, KC_LIST_CMP_CONST struct index_lock, head);
|
||||
|
||||
return ((int)a->type - (int)b->type) ?:
|
||||
scoutfs_cmp_u64s(a->major, b->major) ?:
|
||||
@@ -1072,9 +1151,11 @@ void scoutfs_update_inode_item(struct inode *inode, struct scoutfs_lock *lock,
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_inode sinode;
|
||||
struct scoutfs_key key;
|
||||
int inode_bytes;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
@@ -1083,15 +1164,17 @@ void scoutfs_update_inode_item(struct inode *inode, struct scoutfs_lock *lock,
|
||||
/* set the meta version once per trans for any inode updates */
|
||||
scoutfs_inode_set_meta_seq(inode);
|
||||
|
||||
inode_bytes = scoutfs_inode_vers_bytes(sbi->fmt_vers);
|
||||
|
||||
/* only race with other inode field stores once */
|
||||
store_inode(&sinode, inode);
|
||||
store_inode(&sinode, inode, inode_bytes);
|
||||
|
||||
ret = update_indices(sb, si, ino, inode->i_mode, &sinode, lock_list, lock);
|
||||
BUG_ON(ret);
|
||||
|
||||
scoutfs_inode_init_key(&key, ino);
|
||||
|
||||
err = scoutfs_item_update(sb, &key, &sinode, sizeof(sinode), lock);
|
||||
err = scoutfs_item_update(sb, &key, &sinode, inode_bytes, lock);
|
||||
if (err) {
|
||||
scoutfs_err(sb, "inode %llu update err %d", ino, err);
|
||||
BUG_ON(err);
|
||||
@@ -1459,10 +1542,12 @@ out:
|
||||
int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t rdev,
|
||||
u64 ino, struct scoutfs_lock *lock, struct inode **inode_ret)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_inode_info *si;
|
||||
struct scoutfs_key key;
|
||||
struct scoutfs_inode sinode;
|
||||
struct scoutfs_key key;
|
||||
struct inode *inode;
|
||||
int inode_bytes;
|
||||
int ret;
|
||||
|
||||
inode = new_inode(sb);
|
||||
@@ -1478,6 +1563,7 @@ int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, d
|
||||
si->offline_blocks = 0;
|
||||
si->next_readdir_pos = SCOUTFS_DIRENT_FIRST_POS;
|
||||
si->next_xattr_id = 0;
|
||||
si->proj = 0;
|
||||
si->have_item = false;
|
||||
atomic64_set(&si->last_refreshed, lock->refresh_gen);
|
||||
scoutfs_lock_add_coverage(sb, lock, &si->ino_lock_cov);
|
||||
@@ -1487,20 +1573,23 @@ int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, d
|
||||
scoutfs_inode_set_data_seq(inode);
|
||||
|
||||
inode->i_ino = ino; /* XXX overflow */
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode_init_owner(KC_VFS_INIT_NS
|
||||
inode, dir, mode);
|
||||
inode_set_bytes(inode, 0);
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
inode->i_rdev = rdev;
|
||||
set_inode_ops(inode);
|
||||
|
||||
store_inode(&sinode, inode);
|
||||
inode_bytes = scoutfs_inode_vers_bytes(sbi->fmt_vers);
|
||||
|
||||
store_inode(&sinode, inode, inode_bytes);
|
||||
scoutfs_inode_init_key(&key, scoutfs_ino(inode));
|
||||
|
||||
ret = scoutfs_omap_set(sb, ino);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_item_create(sb, &key, &sinode, sizeof(sinode), lock);
|
||||
ret = scoutfs_item_create(sb, &key, &sinode, inode_bytes, lock);
|
||||
if (ret < 0)
|
||||
scoutfs_omap_clear(sb, ino);
|
||||
out:
|
||||
@@ -1754,7 +1843,7 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
}
|
||||
|
||||
scoutfs_inode_init_key(&key, ino);
|
||||
ret = scoutfs_item_lookup_exact(sb, &key, &sinode, sizeof(sinode), lock);
|
||||
ret = lookup_inode_item(sb, &key, &sinode, lock);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
@@ -1771,6 +1860,9 @@ static int try_delete_inode_items(struct super_block *sb, u64 ino)
|
||||
goto out;
|
||||
|
||||
ret = delete_inode_items(sb, ino, &sinode, lock, orph_lock);
|
||||
if (ret == 0)
|
||||
scoutfs_inc_counter(sb, inode_deleted);
|
||||
|
||||
out:
|
||||
if (clear_trying)
|
||||
clear_bit(bit_nr, ldata->trying);
|
||||
@@ -1879,6 +1971,8 @@ static void iput_worker(struct work_struct *work)
|
||||
while (count-- > 0)
|
||||
iput(inode);
|
||||
|
||||
cond_resched();
|
||||
|
||||
/* can't touch inode after final iput */
|
||||
|
||||
spin_lock(&inf->iput_lock);
|
||||
@@ -2100,7 +2194,7 @@ int scoutfs_inode_walk_writeback(struct super_block *sb, bool write)
|
||||
struct scoutfs_inode_info *si;
|
||||
struct scoutfs_inode_info *tmp;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&inf->writeback_lock);
|
||||
|
||||
@@ -2143,6 +2237,17 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return an error if the inode has the retention flag set and can not
|
||||
* be modified. This mimics the errno returned by the vfs whan an
|
||||
* inode's immutable flag is set. The flag won't be set on older format
|
||||
* versions so we don't check the mounted format version here.
|
||||
*/
|
||||
int scoutfs_inode_check_retention(struct inode *inode)
|
||||
{
|
||||
return (scoutfs_inode_get_flags(inode) & SCOUTFS_INO_FLAG_RETENTION) ? -EPERM : 0;
|
||||
}
|
||||
|
||||
int scoutfs_inode_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
@@ -21,6 +21,7 @@ struct scoutfs_inode_info {
|
||||
u64 data_version;
|
||||
u64 online_blocks;
|
||||
u64 offline_blocks;
|
||||
u64 proj;
|
||||
u32 flags;
|
||||
struct kc_timespec crtime;
|
||||
|
||||
@@ -47,7 +48,7 @@ struct scoutfs_inode_info {
|
||||
atomic64_t last_refreshed;
|
||||
|
||||
/* initialized once for slab object */
|
||||
seqcount_t seqcount;
|
||||
seqlock_t seqlock;
|
||||
bool staging; /* holder of i_mutex is staging */
|
||||
struct scoutfs_per_task pt_data_lock;
|
||||
struct scoutfs_data_waitq data_waitq;
|
||||
@@ -120,17 +121,26 @@ u64 scoutfs_inode_meta_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_version(struct inode *inode);
|
||||
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off);
|
||||
u32 scoutfs_inode_get_flags(struct inode *inode);
|
||||
void scoutfs_inode_set_flags(struct inode *inode, u32 and, u32 or);
|
||||
u64 scoutfs_inode_get_proj(struct inode *inode);
|
||||
void scoutfs_inode_set_proj(struct inode *inode, u64 proj);
|
||||
|
||||
int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock);
|
||||
|
||||
int scoutfs_inode_check_retention(struct inode *inode);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock);
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat);
|
||||
#else
|
||||
int scoutfs_getattr(const struct path *path, struct kstat *stat,
|
||||
int scoutfs_getattr(KC_VFS_NS_DEF
|
||||
const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags);
|
||||
#endif
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
int scoutfs_setattr(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
|
||||
943
kmod/src/ioctl.c
943
kmod/src/ioctl.c
File diff suppressed because it is too large
Load Diff
177
kmod/src/ioctl.h
177
kmod/src/ioctl.h
@@ -366,10 +366,15 @@ struct scoutfs_ioctl_statfs_more {
|
||||
*
|
||||
* Find current waiters that match the inode, op, and block range to wake
|
||||
* up and return an error.
|
||||
*
|
||||
* (*) ca. v1.25 and earlier required that the data_version passed match
|
||||
* that of the waiter, but this check is removed. It was never needed
|
||||
* because no data is modified during this ioctl. Any data_version value
|
||||
* here is thus since then ignored.
|
||||
*/
|
||||
struct scoutfs_ioctl_data_wait_err {
|
||||
__u64 ino;
|
||||
__u64 data_version;
|
||||
__u64 data_version; /* Ignored, see above (*) */
|
||||
__u64 offset;
|
||||
__u64 count;
|
||||
__u64 op;
|
||||
@@ -673,4 +678,174 @@ struct scoutfs_ioctl_dirent {
|
||||
#define SCOUTFS_IOC_GET_REFERRING_ENTRIES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 17, struct scoutfs_ioctl_get_referring_entries)
|
||||
|
||||
struct scoutfs_ioctl_inode_attr_x {
|
||||
__u64 x_mask;
|
||||
__u64 x_flags;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
__u64 size;
|
||||
__u64 bits;
|
||||
__u64 project_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Behavioral flags set in the x_flags field. These flags don't
|
||||
* necessarily correspond to specific attributes, but instead change the
|
||||
* behaviour of a _get_ or _set_ operation.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_F_SIZE_OFFLINE: When setting i_size, also create
|
||||
* extents which are marked offline for the region of the file from
|
||||
* offset 0 to the new set size. This can only be set when setting the
|
||||
* size and has no effect if setting the size fails.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_F_SIZE_OFFLINE (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_F__UNKNOWN (U64_MAX << 1)
|
||||
|
||||
/*
|
||||
* Single-bit values stored in the @bits field. These indicate whether
|
||||
* the bit is set, or not. The main _IAX_ bits set in the mask indicate
|
||||
* whether this value bit is populated by _get or stored by _set.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_B_RETENTION (1ULL << 0)
|
||||
|
||||
/*
|
||||
* x_mask bits which indicate which attributes of the inode to populate
|
||||
* on return for _get or to set on the inode for _set. Each mask bit
|
||||
* corresponds to the matching named field in the attr_x struct passed
|
||||
* to the _get_ and _set_ calls.
|
||||
*
|
||||
* Each field can have different permissions or other attribute
|
||||
* requirements which can cause calls to fail. If _set_ fails then no
|
||||
* other attribute changes will have been made by the same call.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_RETENTION: Mark a file for retention. When marked,
|
||||
* no modification can be made to the file other than changing extended
|
||||
* attributes outside the "user." prefix and clearing the retention
|
||||
* mark. This can only be set on regular files and requires root (the
|
||||
* CAP_SYS_ADMIN capability). Other attributes can be set with a
|
||||
* set_attr_x call on a retention inode as long as that call also
|
||||
* successfully clears the retention mark.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_META_SEQ (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_DATA_SEQ (1ULL << 1)
|
||||
#define SCOUTFS_IOC_IAX_DATA_VERSION (1ULL << 2)
|
||||
#define SCOUTFS_IOC_IAX_ONLINE_BLOCKS (1ULL << 3)
|
||||
#define SCOUTFS_IOC_IAX_OFFLINE_BLOCKS (1ULL << 4)
|
||||
#define SCOUTFS_IOC_IAX_CTIME (1ULL << 5)
|
||||
#define SCOUTFS_IOC_IAX_CRTIME (1ULL << 6)
|
||||
#define SCOUTFS_IOC_IAX_SIZE (1ULL << 7)
|
||||
#define SCOUTFS_IOC_IAX_RETENTION (1ULL << 8)
|
||||
#define SCOUTFS_IOC_IAX_PROJECT_ID (1ULL << 9)
|
||||
|
||||
/* single bit attributes that are packed in the bits field as _B_ */
|
||||
#define SCOUTFS_IOC_IAX__BITS (SCOUTFS_IOC_IAX_RETENTION)
|
||||
/* inverse of all the bits we understand */
|
||||
#define SCOUTFS_IOC_IAX__UNKNOWN (U64_MAX << 10)
|
||||
|
||||
#define SCOUTFS_IOC_GET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 18, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
#define SCOUTFS_IOC_SET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 19, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
/*
|
||||
* (These fields are documented in the order that they're displayed by
|
||||
* the scoutfs cli utility which matches the sort order of the rules.)
|
||||
*
|
||||
* @prio: The priority of the rule. Rules are sorted by their fields
|
||||
* with prio at the highest magnitude. When multiple rules match the
|
||||
* rule with the highest sort order is enforced. The priority field
|
||||
* lets rules override the default field sort order.
|
||||
*
|
||||
* @name_val[3]: The three 64bit values that make up the name of the
|
||||
* totl xattr whose total will be checked against the rule's limit to
|
||||
* see if the quota rule has been exceeded. The behavior of the values
|
||||
* can be changed by their corresponding name_source and name_flags.
|
||||
*
|
||||
* @name_source[3]: The SQ_NS_ enums that control where the value comes
|
||||
* from. _LITERAL uses the value from name_val. Inode attribute
|
||||
* sources (_PROJ, _UID, _GID) are taken from the inode of the operation
|
||||
* that is being checked against the rule.
|
||||
*
|
||||
* @name_flags[3]: The SQ_NF_ enums that alter the name values. _SELECT
|
||||
* makes the rule only match if the inode attribute of the operation
|
||||
* matches the attribute value stored in name_val. This lets rules
|
||||
* match a specific value of an attribute rather than mapping all
|
||||
* attribute values of to totl names.
|
||||
*
|
||||
* @op: The SQ_OP_ enums which specify the operation that can't exceed
|
||||
* the rule's limit. _INODE checks inode creation and the inode
|
||||
* attributes are taken from the inode that would be created. _DATA
|
||||
* checks file data block allocation and the inode fields come from the
|
||||
* inode that is allocating the blocks.
|
||||
*
|
||||
* @limit: The 64bit value that is checked against the totl value
|
||||
* described by the rule. If the totl value is greater than or equal to
|
||||
* this value of the matching rule then the operation will return
|
||||
* -EDQUOT.
|
||||
*
|
||||
* @rule_flags: SQ_RF_TOTL_COUNT indicates that the rule's limit should
|
||||
* be checked against the number of xattrs contributing to a totl value
|
||||
* instead of the sum of the xattrs.
|
||||
*/
|
||||
struct scoutfs_ioctl_quota_rule {
|
||||
__u64 name_val[3];
|
||||
__u64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_get_quota_rules {
|
||||
__u64 iterator[2];
|
||||
__u64 rules_ptr;
|
||||
__u64 rules_nr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Rules are uniquely identified by their non-padded fields. Addition will fail
|
||||
* with -EEXIST if the specified rule already exists and deletion must find a rule
|
||||
* with all matching fields to delete.
|
||||
*/
|
||||
#define SCOUTFS_IOC_GET_QUOTA_RULES \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 20, struct scoutfs_ioctl_get_quota_rules)
|
||||
#define SCOUTFS_IOC_ADD_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 21, struct scoutfs_ioctl_quota_rule)
|
||||
#define SCOUTFS_IOC_DEL_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 22, struct scoutfs_ioctl_quota_rule)
|
||||
|
||||
/*
|
||||
* Inodes can be indexed in a global key space at a position determined
|
||||
* by a .indx. tagged xattr. The xattr name specifies the two index
|
||||
* position values, with major having the more significant comparison
|
||||
* order.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_index_entry {
|
||||
__u64 minor;
|
||||
__u64 ino;
|
||||
__u8 major;
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_read_xattr_index {
|
||||
__u64 flags;
|
||||
struct scoutfs_ioctl_xattr_index_entry first;
|
||||
struct scoutfs_ioctl_xattr_index_entry last;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_nr;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_INDEX \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 23, struct scoutfs_ioctl_read_xattr_index)
|
||||
|
||||
#endif
|
||||
|
||||
281
kmod/src/item.c
281
kmod/src/item.c
@@ -24,6 +24,7 @@
|
||||
#include "item.h"
|
||||
#include "forest.h"
|
||||
#include "block.h"
|
||||
#include "msg.h"
|
||||
#include "trans.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
@@ -85,6 +86,8 @@ struct item_cache_info {
|
||||
/* often walked, but per-cpu refs are fast path */
|
||||
rwlock_t rwlock;
|
||||
struct rb_root pg_root;
|
||||
/* stop readers from caching stale items behind reclaimed cleaned written items */
|
||||
u64 read_dirty_barrier;
|
||||
|
||||
/* page-granular modification by writers, then exclusive to commit */
|
||||
spinlock_t dirty_lock;
|
||||
@@ -95,10 +98,6 @@ struct item_cache_info {
|
||||
spinlock_t lru_lock;
|
||||
struct list_head lru_list;
|
||||
unsigned long lru_pages;
|
||||
|
||||
/* written by page readers, read by shrink */
|
||||
spinlock_t active_lock;
|
||||
struct list_head active_list;
|
||||
};
|
||||
|
||||
#define DECLARE_ITEM_CACHE_INFO(sb, name) \
|
||||
@@ -1284,78 +1283,6 @@ static int cache_empty_page(struct super_block *sb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Readers operate independently from dirty items and transactions.
|
||||
* They read a set of persistent items and insert them into the cache
|
||||
* when there aren't already pages whose key range contains the items.
|
||||
* This naturally prefers cached dirty items over stale read items.
|
||||
*
|
||||
* We have to deal with the case where dirty items are written and
|
||||
* invalidated while a read is in flight. The reader won't have seen
|
||||
* the items that were dirty in their persistent roots as they started
|
||||
* reading. By the time they insert their read pages the previously
|
||||
* dirty items have been reclaimed and are not in the cache. The old
|
||||
* stale items will be inserted in their place, effectively corrupting
|
||||
* by having the dirty items disappear.
|
||||
*
|
||||
* We fix this by tracking the max seq of items in pages. As readers
|
||||
* start they record the current transaction seq. Invalidation skips
|
||||
* pages with a max seq greater than the first reader seq because the
|
||||
* items in the page have to stick around to prevent the readers stale
|
||||
* items from being inserted.
|
||||
*
|
||||
* This naturally only affects a small set of pages with items that were
|
||||
* written relatively recently. If we're in memory pressure then we
|
||||
* probably have a lot of pages and they'll naturally have items that
|
||||
* were visible to any raders. We don't bother with the complicated and
|
||||
* expensive further refinement of tracking the ranges that are being
|
||||
* read and comparing those with pages to invalidate.
|
||||
*/
|
||||
struct active_reader {
|
||||
struct list_head head;
|
||||
u64 seq;
|
||||
};
|
||||
|
||||
#define INIT_ACTIVE_READER(rdr) \
|
||||
struct active_reader rdr = { .head = LIST_HEAD_INIT(rdr.head) }
|
||||
|
||||
static void add_active_reader(struct super_block *sb, struct active_reader *active)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
|
||||
BUG_ON(!list_empty(&active->head));
|
||||
|
||||
active->seq = scoutfs_trans_sample_seq(sb);
|
||||
|
||||
spin_lock(&cinf->active_lock);
|
||||
list_add_tail(&active->head, &cinf->active_list);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
}
|
||||
|
||||
static u64 first_active_reader_seq(struct item_cache_info *cinf)
|
||||
{
|
||||
struct active_reader *active;
|
||||
u64 first;
|
||||
|
||||
/* only the calling task adds or deletes this active */
|
||||
spin_lock(&cinf->active_lock);
|
||||
active = list_first_entry_or_null(&cinf->active_list, struct active_reader, head);
|
||||
first = active ? active->seq : U64_MAX;
|
||||
spin_unlock(&cinf->active_lock);
|
||||
|
||||
return first;
|
||||
}
|
||||
|
||||
static void del_active_reader(struct item_cache_info *cinf, struct active_reader *active)
|
||||
{
|
||||
/* only the calling task adds or deletes this active */
|
||||
if (!list_empty(&active->head)) {
|
||||
spin_lock(&cinf->active_lock);
|
||||
list_del_init(&active->head);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a newly read item to the pages that we're assembling for
|
||||
* insertion into the cache. These pages are private, they only exist
|
||||
@@ -1449,24 +1376,34 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key, u64 s
|
||||
* and duplicates, we insert any resulting pages which don't overlap
|
||||
* with existing cached pages.
|
||||
*
|
||||
* We only insert uncached regions because this is called with cluster
|
||||
* locks held, but without locking the cache. The regions we read can
|
||||
* be stale with respect to the current cache, which can be read and
|
||||
* dirtied by other cluster lock holders on our node, but the cluster
|
||||
* locks protect the stable items we read. Invalidation is careful not
|
||||
* to drop pages that have items that we couldn't see because they were
|
||||
* dirty when we started reading.
|
||||
*
|
||||
* The forest item reader is reading stable trees that could be
|
||||
* overwritten. It can return -ESTALE which we return to the caller who
|
||||
* will retry the operation and work with a new set of more recent
|
||||
* btrees.
|
||||
*
|
||||
* We only insert uncached regions because this is called with cluster
|
||||
* locks held, but without locking the cache. The regions we read can
|
||||
* be stale with respect to the current cache, which can be read and
|
||||
* dirtied by other cluster lock holders on our node, but the cluster
|
||||
* locks protect the stable items we read.
|
||||
*
|
||||
* Using the presence of locally written dirty pages to override stale
|
||||
* read pages only works if, well, the more recent locally written pages
|
||||
* are still present. Readers are totally decoupled from writers and
|
||||
* can have a set of items that is very old indeed. In the mean time
|
||||
* more recent items would have been dirtied locally, committed,
|
||||
* cleaned, and reclaimed. We have a coarse barrier which ensures that
|
||||
* readers can't insert items read from old roots from before local data
|
||||
* was written. If a write completes while a read is in progress the
|
||||
* read will have to retry. The retried read can use cached blocks so
|
||||
* we're relying on reads being much faster than writes to reduce the
|
||||
* overhead to mostly cpu work of recollecting the items from cached
|
||||
* blocks via a more recent root from the server.
|
||||
*/
|
||||
static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
struct scoutfs_key *key, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct rb_root root = RB_ROOT;
|
||||
INIT_ACTIVE_READER(active);
|
||||
struct cached_page *right = NULL;
|
||||
struct cached_page *pg;
|
||||
struct cached_page *rd;
|
||||
@@ -1479,6 +1416,7 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
struct rb_node *par;
|
||||
struct rb_node *pg_tmp;
|
||||
struct rb_node *item_tmp;
|
||||
u64 rdbar;
|
||||
int pgi;
|
||||
int ret;
|
||||
|
||||
@@ -1492,8 +1430,9 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
pg->end = lock->end;
|
||||
rbtree_insert(&pg->node, NULL, &root.rb_node, &root);
|
||||
|
||||
/* set active reader seq before reading persistent roots */
|
||||
add_active_reader(sb, &active);
|
||||
read_lock(&cinf->rwlock);
|
||||
rdbar = cinf->read_dirty_barrier;
|
||||
read_unlock(&cinf->rwlock);
|
||||
|
||||
start = lock->start;
|
||||
end = lock->end;
|
||||
@@ -1532,6 +1471,13 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
retry:
|
||||
write_lock(&cinf->rwlock);
|
||||
|
||||
/* can't insert if write has cleaned since we read */
|
||||
if (cinf->read_dirty_barrier != rdbar) {
|
||||
scoutfs_inc_counter(sb, item_read_pages_barrier);
|
||||
ret = -ESTALE;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
while ((rd = first_page(&root))) {
|
||||
|
||||
pg = page_rbtree_walk(sb, &cinf->pg_root, &rd->start, &rd->end,
|
||||
@@ -1569,12 +1515,12 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
unlock:
|
||||
write_unlock(&cinf->rwlock);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
del_active_reader(cinf, &active);
|
||||
|
||||
/* free any pages we left dangling on error */
|
||||
for_each_page_safe(&root, rd, pg_tmp) {
|
||||
rbtree_erase(&rd->node, &root);
|
||||
@@ -1634,6 +1580,7 @@ retry:
|
||||
ret = read_pages(sb, cinf, key, lock);
|
||||
if (ret < 0 && ret != -ESTALE)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, item_read_pages_retry);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -1670,13 +1617,24 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lock_safe(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
static int lock_safe(struct super_block *sb, struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
int mode)
|
||||
{
|
||||
if (WARN_ON_ONCE(!scoutfs_lock_protected(lock, key, mode)))
|
||||
bool prot = scoutfs_lock_protected(lock, key, mode);
|
||||
|
||||
if (!prot) {
|
||||
static bool once = false;
|
||||
if (!once) {
|
||||
scoutfs_err(sb, "lock (start "SK_FMT" end "SK_FMT" mode 0x%x) does not protect operation (key "SK_FMT" mode 0x%x)",
|
||||
SK_ARG(&lock->start), SK_ARG(&lock->end), lock->mode,
|
||||
SK_ARG(key), mode);
|
||||
dump_stack();
|
||||
once = true;
|
||||
}
|
||||
return -EINVAL;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int optional_lock_mode_match(struct scoutfs_lock *lock, int mode)
|
||||
@@ -1708,8 +1666,8 @@ static int copy_val(void *dst, int dst_len, void *src, int src_len)
|
||||
* The amount of bytes copied is returned which can be 0 or truncated if
|
||||
* the caller's buffer isn't big enough.
|
||||
*/
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
static int item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, int len_limit, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
struct cached_item *item;
|
||||
@@ -1718,7 +1676,7 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_lookup);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_READ)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_READ)))
|
||||
goto out;
|
||||
|
||||
ret = get_cached_page(sb, cinf, lock, key, false, false, 0, &pg);
|
||||
@@ -1729,6 +1687,8 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
item = item_rbtree_walk(&pg->item_root, key, NULL, NULL, NULL);
|
||||
if (!item || item->deletion)
|
||||
ret = -ENOENT;
|
||||
else if (len_limit > 0 && item->val_len > len_limit)
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = copy_val(val, val_len, item->val, item->val_len);
|
||||
|
||||
@@ -1737,13 +1697,38 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
return item_lookup(sb, key, val, val_len, 0, lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy an item's value into the caller's buffer. If the item's value
|
||||
* is larger than the caller's buffer then -EIO is returned. If the
|
||||
* item is smaller then the bytes from the end of the copied value to
|
||||
* the end of the buffer are zeroed. The number of value bytes copied
|
||||
* is returned, and 0 can be returned for an item with no value.
|
||||
*/
|
||||
int scoutfs_item_lookup_smaller_zero(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = item_lookup(sb, key, val, val_len, val_len, lock);
|
||||
if (ret >= 0 && ret < val_len)
|
||||
memset(val + ret, 0, val_len - ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_item_lookup_exact(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_item_lookup(sb, key, val, val_len, lock);
|
||||
ret = item_lookup(sb, key, val, val_len, 0, lock);
|
||||
if (ret == val_len)
|
||||
ret = 0;
|
||||
else if (ret >= 0)
|
||||
@@ -1793,7 +1778,7 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key *key,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_READ)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_READ)))
|
||||
goto out;
|
||||
|
||||
pos = *key;
|
||||
@@ -1874,7 +1859,7 @@ int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_dirty);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -1920,7 +1905,7 @@ static int item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_create);
|
||||
|
||||
if ((ret = lock_safe(lock, key, mode)) ||
|
||||
if ((ret = lock_safe(sb, lock, key, mode)) ||
|
||||
(ret = optional_lock_mode_match(primary, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
@@ -1963,7 +1948,7 @@ int scoutfs_item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
return item_create(sb, key, val, val_len, lock, NULL,
|
||||
SCOUTFS_LOCK_READ, false);
|
||||
SCOUTFS_LOCK_WRITE, false);
|
||||
}
|
||||
|
||||
int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
@@ -1994,7 +1979,7 @@ int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_update);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -2062,7 +2047,7 @@ int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_delta);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -2135,7 +2120,7 @@ static int item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_delete);
|
||||
|
||||
if ((ret = lock_safe(lock, key, mode)) ||
|
||||
if ((ret = lock_safe(sb, lock, key, mode)) ||
|
||||
(ret = optional_lock_mode_match(primary, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
@@ -2202,18 +2187,18 @@ u64 scoutfs_item_dirty_pages(struct super_block *sb)
|
||||
return (u64)atomic_read(&cinf->dirty_pages);
|
||||
}
|
||||
|
||||
static int cmp_pg_start(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_pg_start(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct cached_page *a = list_entry(A, struct cached_page, dirty_head);
|
||||
struct cached_page *b = list_entry(B, struct cached_page, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_page *a = list_entry(A, KC_LIST_CMP_CONST struct cached_page, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_page *b = list_entry(B, KC_LIST_CMP_CONST struct cached_page, dirty_head);
|
||||
|
||||
return scoutfs_key_compare(&a->start, &b->start);
|
||||
}
|
||||
|
||||
static int cmp_item_key(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_item_key(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct cached_item *a = list_entry(A, struct cached_item, dirty_head);
|
||||
struct cached_item *b = list_entry(B, struct cached_item, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_item *a = list_entry(A, KC_LIST_CMP_CONST struct cached_item, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_item *b = list_entry(B, KC_LIST_CMP_CONST struct cached_item, dirty_head);
|
||||
|
||||
return scoutfs_key_compare(&a->key, &b->key);
|
||||
}
|
||||
@@ -2362,6 +2347,12 @@ out:
|
||||
* The caller has successfully committed all the dirty btree blocks that
|
||||
* contained the currently dirty items. Clear all the dirty items and
|
||||
* pages.
|
||||
*
|
||||
* This strange lock/trylock loop comes from sparse issuing spurious
|
||||
* mismatched context warnings if we do anything (like unlock and relax)
|
||||
* in the else branch of the failed trylock. We're jumping through
|
||||
* hoops to not use the else but still drop and reacquire the dirty_lock
|
||||
* if the trylock fails.
|
||||
*/
|
||||
int scoutfs_item_write_done(struct super_block *sb)
|
||||
{
|
||||
@@ -2370,40 +2361,35 @@ int scoutfs_item_write_done(struct super_block *sb)
|
||||
struct cached_item *tmp;
|
||||
struct cached_page *pg;
|
||||
|
||||
retry:
|
||||
/* don't let read_pages miss written+cleaned items */
|
||||
write_lock(&cinf->rwlock);
|
||||
cinf->read_dirty_barrier++;
|
||||
write_unlock(&cinf->rwlock);
|
||||
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
|
||||
while ((pg = list_first_entry_or_null(&cinf->dirty_list,
|
||||
struct cached_page,
|
||||
dirty_head))) {
|
||||
|
||||
if (!write_trylock(&pg->rwlock)) {
|
||||
while ((pg = list_first_entry_or_null(&cinf->dirty_list, struct cached_page, dirty_head))) {
|
||||
if (write_trylock(&pg->rwlock)) {
|
||||
spin_unlock(&cinf->dirty_lock);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
list_for_each_entry_safe(item, tmp, &pg->dirty_list,
|
||||
dirty_head) {
|
||||
clear_item_dirty(sb, cinf, pg, item);
|
||||
|
||||
if (item->delta)
|
||||
scoutfs_inc_counter(sb, item_delta_written);
|
||||
|
||||
/* free deletion items */
|
||||
if (item->deletion || item->delta)
|
||||
erase_item(pg, item);
|
||||
else
|
||||
item->persistent = 1;
|
||||
}
|
||||
|
||||
write_unlock(&pg->rwlock);
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
}
|
||||
spin_unlock(&cinf->dirty_lock);
|
||||
|
||||
list_for_each_entry_safe(item, tmp, &pg->dirty_list,
|
||||
dirty_head) {
|
||||
clear_item_dirty(sb, cinf, pg, item);
|
||||
|
||||
if (item->delta)
|
||||
scoutfs_inc_counter(sb, item_delta_written);
|
||||
|
||||
/* free deletion items */
|
||||
if (item->deletion || item->delta)
|
||||
erase_item(pg, item);
|
||||
else
|
||||
item->persistent = 1;
|
||||
}
|
||||
|
||||
write_unlock(&pg->rwlock);
|
||||
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
}
|
||||
|
||||
} while (pg);
|
||||
spin_unlock(&cinf->dirty_lock);
|
||||
|
||||
return 0;
|
||||
@@ -2558,24 +2544,15 @@ static unsigned long item_cache_scan_objects(struct shrinker *shrink,
|
||||
struct cached_page *tmp;
|
||||
struct cached_page *pg;
|
||||
unsigned long freed = 0;
|
||||
u64 first_reader_seq;
|
||||
int nr = sc->nr_to_scan;
|
||||
|
||||
scoutfs_inc_counter(sb, item_cache_scan_objects);
|
||||
|
||||
/* can't invalidate pages with items that weren't visible to first reader */
|
||||
first_reader_seq = first_active_reader_seq(cinf);
|
||||
|
||||
write_lock(&cinf->rwlock);
|
||||
spin_lock(&cinf->lru_lock);
|
||||
|
||||
list_for_each_entry_safe(pg, tmp, &cinf->lru_list, lru_head) {
|
||||
|
||||
if (first_reader_seq <= pg->max_seq) {
|
||||
scoutfs_inc_counter(sb, item_shrink_page_reader);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!write_trylock(&pg->rwlock)) {
|
||||
scoutfs_inc_counter(sb, item_shrink_page_trylock);
|
||||
continue;
|
||||
@@ -2642,8 +2619,6 @@ int scoutfs_item_setup(struct super_block *sb)
|
||||
atomic_set(&cinf->dirty_pages, 0);
|
||||
spin_lock_init(&cinf->lru_lock);
|
||||
INIT_LIST_HEAD(&cinf->lru_list);
|
||||
spin_lock_init(&cinf->active_lock);
|
||||
INIT_LIST_HEAD(&cinf->active_list);
|
||||
|
||||
cinf->pcpu_pages = alloc_percpu(struct item_percpu_pages);
|
||||
if (!cinf->pcpu_pages)
|
||||
@@ -2654,7 +2629,7 @@ int scoutfs_item_setup(struct super_block *sb)
|
||||
|
||||
KC_INIT_SHRINKER_FUNCS(&cinf->shrinker, item_cache_count_objects,
|
||||
item_cache_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&cinf->shrinker);
|
||||
KC_REGISTER_SHRINKER(&cinf->shrinker, "scoutfs-item:" SCSBF, SCSB_ARGS(sb));
|
||||
#ifdef KC_CPU_NOTIFIER
|
||||
cinf->notifier.notifier_call = item_cpu_callback;
|
||||
register_hotcpu_notifier(&cinf->notifier);
|
||||
@@ -2676,8 +2651,6 @@ void scoutfs_item_destroy(struct super_block *sb)
|
||||
int cpu;
|
||||
|
||||
if (cinf) {
|
||||
BUG_ON(!list_empty(&cinf->active_list));
|
||||
|
||||
#ifdef KC_CPU_NOTIFIER
|
||||
unregister_hotcpu_notifier(&cinf->notifier);
|
||||
#endif
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_smaller_zero(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_exact(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
@@ -67,12 +67,11 @@ kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
ssize_t status;
|
||||
struct iov_iter i;
|
||||
|
||||
iov_iter_init(&i, WRITE, iov, nr_segs, count);
|
||||
status = generic_perform_write(file, &i, pos);
|
||||
status = kc_generic_perform_write(iocb, &i, pos);
|
||||
|
||||
if (likely(status >= 0)) {
|
||||
written += status;
|
||||
@@ -82,3 +81,69 @@ kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
return written ? written : status;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#ifdef KC_LIST_LRU_WALK_CB_ITEM_LOCK
|
||||
static enum lru_status kc_isolate(struct list_head *item, spinlock_t *lock, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args *args = cb_arg;
|
||||
|
||||
/* isolate doesn't use list, nr_items updated in caller */
|
||||
return args->isolate(item, NULL, args->cb_arg);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_walk(lru, kc_isolate, &args, nr_to_walk);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_shrink_walk(lru, sc, kc_isolate, &args);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_LRU_WALK_CB_LIST_LOCK
|
||||
static enum lru_status kc_isolate(struct list_head *item, struct list_lru_one *list,
|
||||
spinlock_t *lock, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args *args = cb_arg;
|
||||
|
||||
return args->isolate(item, list, args->cb_arg);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_walk(lru, kc_isolate, &args, nr_to_walk);
|
||||
}
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_shrink_walk(lru, sc, kc_isolate, &args);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -29,50 +29,6 @@ do { \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef KC_ITERATE_DIR_CONTEXT
|
||||
typedef filldir_t kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, dirent, ctx)
|
||||
#define KC_FOP_READDIR readdir
|
||||
#define kc_readdir_pos(filp, ctx) (filp)->f_pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, dirent, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
(ctx(dirent, name, name_len, pos, ino, dt) == 0)
|
||||
#else
|
||||
typedef struct dir_context * kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, ctx)
|
||||
#define KC_FOP_READDIR iterate
|
||||
#define kc_readdir_pos(filp, ctx) (ctx)->pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
dir_emit(ctx, name, name_len, ino, dt)
|
||||
#endif
|
||||
|
||||
#ifndef KC_DIR_EMIT_DOTS
|
||||
/*
|
||||
* Kernels before ->iterate and don't have dir_emit_dots so we give them
|
||||
* one that works with the ->readdir() filldir() method.
|
||||
*/
|
||||
static inline int dir_emit_dots(struct file *file, void *dirent,
|
||||
filldir_t filldir)
|
||||
{
|
||||
if (file->f_pos == 0) {
|
||||
if (filldir(dirent, ".", 1, 1,
|
||||
file->f_path.dentry->d_inode->i_ino, DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 1;
|
||||
}
|
||||
|
||||
if (file->f_pos == 1) {
|
||||
if (filldir(dirent, "..", 2, 1,
|
||||
parent_ino(file->f_path.dentry), DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 2;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_POSIX_ACL_VALID_USER_NS
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(user_ns, acl)
|
||||
#else
|
||||
@@ -197,7 +153,11 @@ struct timespec64 kc_current_time(struct inode *inode);
|
||||
} while (0)
|
||||
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(ptr, type, shrinker)
|
||||
#define KC_REGISTER_SHRINKER(ptr) (register_shrinker(ptr))
|
||||
#ifdef KC_SHRINKER_NAME
|
||||
#define KC_REGISTER_SHRINKER register_shrinker
|
||||
#else
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr))
|
||||
#endif /* KC_SHRINKER_NAME */
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr)
|
||||
#else
|
||||
@@ -224,7 +184,7 @@ struct kc_shrinker_wrapper {
|
||||
_wrap->shrink.seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(container_of(ptr, struct kc_shrinker_wrapper, shrink), type, shrinker)
|
||||
#define KC_REGISTER_SHRINKER(ptr) (register_shrinker(ptr.shrink))
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr.shrink))
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr.shrink))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr.shrink)
|
||||
|
||||
@@ -271,6 +231,262 @@ ssize_t kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *i
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written);
|
||||
#define generic_file_buffered_write kc_generic_file_buffered_write
|
||||
#ifdef KC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
iocb->ki_pos = pos;
|
||||
return generic_perform_write(iocb, iter);
|
||||
}
|
||||
#else
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
return generic_perform_write(file, iter, pos);
|
||||
}
|
||||
#endif
|
||||
#endif // KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
|
||||
#ifndef KC_HAVE_BLK_OPF_T
|
||||
/* typedef __u32 __bitwise blk_opf_t; */
|
||||
typedef unsigned int blk_opf_t;
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
#define KC_LIST_CMP_CONST const
|
||||
#else
|
||||
#define KC_LIST_CMP_CONST
|
||||
#endif
|
||||
|
||||
#ifdef KC_VMALLOC_PGPROT_T
|
||||
#define kc__vmalloc(size, gfp_mask) __vmalloc(size, gfp_mask, PAGE_KERNEL)
|
||||
#else
|
||||
#define kc__vmalloc __vmalloc
|
||||
#endif
|
||||
|
||||
#ifdef KC_VFS_METHOD_MNT_IDMAP_ARG
|
||||
#define KC_VFS_NS_DEF struct mnt_idmap *mnt_idmap,
|
||||
#define KC_VFS_NS mnt_idmap,
|
||||
#define KC_VFS_INIT_NS &nop_mnt_idmap,
|
||||
#else
|
||||
#ifdef KC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
#define KC_VFS_NS_DEF struct user_namespace *mnt_user_ns,
|
||||
#define KC_VFS_NS mnt_user_ns,
|
||||
#define KC_VFS_INIT_NS &init_user_ns,
|
||||
#else
|
||||
#define KC_VFS_NS_DEF
|
||||
#define KC_VFS_NS
|
||||
#define KC_VFS_INIT_NS
|
||||
#endif
|
||||
#endif /* KC_VFS_METHOD_MNT_IDMAP_ARG */
|
||||
|
||||
#ifdef KC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
#define kc_bio_alloc bio_alloc
|
||||
#else
|
||||
#include <linux/bio.h>
|
||||
static inline struct bio *kc_bio_alloc(struct block_device *bdev, unsigned short nr_vecs,
|
||||
blk_opf_t opf, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *b = bio_alloc(gfp_mask, nr_vecs);
|
||||
if (b) {
|
||||
kc_bio_set_opf(b, opf);
|
||||
bio_set_dev(b, bdev);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_FIEMAP_PREP
|
||||
#define fiemap_prep(inode, fieinfo, start, len, flags) fiemap_check_flags(fieinfo, flags)
|
||||
#endif
|
||||
|
||||
#ifndef KC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
#define __kernel_old_timeval timeval
|
||||
#define ns_to_kernel_old_timeval(ktime) ns_to_timeval(ktime.tv64)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_SET_SNDTIMEO
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
sock_set_sndtimeo(sock->sk, secs);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
sockptr_t kopt;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
|
||||
kopt = KERNEL_SOCKPTR(&tv);
|
||||
|
||||
return sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO_NEW,
|
||||
kopt, sizeof(tv));
|
||||
}
|
||||
#else
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
struct timeval tv = { .tv_sec = secs, .tv_usec = 0 };
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_SETSOCKOPT_SOCKPTR_T
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
sockptr_t kopt = KERNEL_SOCKPTR(optval);
|
||||
return sock_setsockopt(sock, level, op, kopt, sizeof(optval));
|
||||
}
|
||||
#else
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
return kernel_setsockopt(sock, level, op, (char *)optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_HAVE_TCP_SET_SOCKFN
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepintvl(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepidle(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
tcp_sock_set_user_timeout(sock->sk, val);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
tcp_sock_set_nodelay(sock->sk);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
int optval = 1;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_INODE_DIO_END
|
||||
#define kc_inode_dio_end inode_dio_end
|
||||
#else
|
||||
#define kc_inode_dio_end inode_dio_done
|
||||
#endif
|
||||
|
||||
#ifndef KC_MM_VM_FAULT_T
|
||||
typedef unsigned int vm_fault_t;
|
||||
static inline vm_fault_t vmf_error(int err)
|
||||
{
|
||||
if (err == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#ifndef KC_LIST_LRU_SHRINK_COUNT_WALK
|
||||
/* we don't bother with sc->{nid,memcg} (which doesn't exist in oldest kernels) */
|
||||
static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
return list_lru_count(lru);
|
||||
}
|
||||
static inline unsigned long
|
||||
list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
list_lru_walk_cb isolate, void *cb_arg)
|
||||
{
|
||||
return list_lru_walk(lru, isolate, cb_arg, sc->nr_to_scan);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_LIST_LRU_ADD_OBJ
|
||||
#define list_lru_add_obj list_lru_add
|
||||
#define list_lru_del_obj list_lru_del
|
||||
#endif
|
||||
|
||||
#if defined(KC_LIST_LRU_WALK_CB_LIST_LOCK) || defined(KC_LIST_LRU_WALK_CB_ITEM_LOCK)
|
||||
struct list_lru_one;
|
||||
typedef enum lru_status (*kc_list_lru_walk_cb_t)(struct list_head *item, struct list_lru_one *list,
|
||||
void *cb_arg);
|
||||
struct kc_isolate_args {
|
||||
kc_list_lru_walk_cb_t isolate;
|
||||
void *cb_arg;
|
||||
};
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk);
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg);
|
||||
#else
|
||||
#define kc_list_lru_shrink_walk list_lru_shrink_walk
|
||||
#endif
|
||||
|
||||
#if defined(KC_LIST_LRU_WALK_CB_ITEM_LOCK)
|
||||
/* isolate moved by hand, nr_items updated in walk as _REMOVE returned */
|
||||
static inline void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
|
||||
struct list_head *head)
|
||||
{
|
||||
list_move(item, head);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_STACK_TRACE_SAVE
|
||||
#include <linux/stacktrace.h>
|
||||
static inline unsigned int stack_trace_save(unsigned long *store, unsigned int size,
|
||||
unsigned int skipnr)
|
||||
{
|
||||
struct stack_trace trace = {
|
||||
.entries = store,
|
||||
.max_entries = size,
|
||||
.skip = skipnr,
|
||||
};
|
||||
|
||||
save_stack_trace(&trace);
|
||||
return trace.nr_entries;
|
||||
}
|
||||
|
||||
static inline void stack_trace_print(unsigned long *entries, unsigned int nr_entries, int spaces)
|
||||
{
|
||||
struct stack_trace trace = {
|
||||
.entries = entries,
|
||||
.nr_entries = nr_entries,
|
||||
};
|
||||
|
||||
print_stack_trace(&trace, spaces);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -125,8 +125,8 @@ static inline bool scoutfs_key_is_ones(struct scoutfs_key *key)
|
||||
* other alternatives across keys that first differ in any of the
|
||||
* values. Say maybe 20% faster than memcmp.
|
||||
*/
|
||||
static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
struct scoutfs_key *b)
|
||||
static inline int scoutfs_key_compare(const struct scoutfs_key *a,
|
||||
const struct scoutfs_key *b)
|
||||
{
|
||||
return scoutfs_cmp(a->sk_zone, b->sk_zone) ?:
|
||||
scoutfs_cmp(le64_to_cpu(a->_sk_first), le64_to_cpu(b->_sk_first)) ?:
|
||||
@@ -142,10 +142,10 @@ static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
* 1: a_start > b_end
|
||||
* else 0: ranges overlap
|
||||
*/
|
||||
static inline int scoutfs_key_compare_ranges(struct scoutfs_key *a_start,
|
||||
struct scoutfs_key *a_end,
|
||||
struct scoutfs_key *b_start,
|
||||
struct scoutfs_key *b_end)
|
||||
static inline int scoutfs_key_compare_ranges(const struct scoutfs_key *a_start,
|
||||
const struct scoutfs_key *a_end,
|
||||
const struct scoutfs_key *b_start,
|
||||
const struct scoutfs_key *b_end)
|
||||
{
|
||||
return scoutfs_key_compare(a_end, b_start) < 0 ? -1 :
|
||||
scoutfs_key_compare(a_start, b_end) > 0 ? 1 :
|
||||
|
||||
@@ -36,6 +36,8 @@
|
||||
#include "item.h"
|
||||
#include "omap.h"
|
||||
#include "util.h"
|
||||
#include "totl.h"
|
||||
#include "quota.h"
|
||||
|
||||
/*
|
||||
* scoutfs uses a lock service to manage item cache consistency between
|
||||
@@ -166,7 +168,6 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode prev, enum scoutfs_lock_mode mode)
|
||||
{
|
||||
struct scoutfs_lock_coverage *cov;
|
||||
struct scoutfs_lock_coverage *tmp;
|
||||
u64 ino, last;
|
||||
int ret = 0;
|
||||
|
||||
@@ -185,21 +186,27 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (lock->start.sk_zone == SCOUTFS_QUOTA_ZONE && !lock_mode_can_read(mode))
|
||||
scoutfs_quota_invalidate(sb);
|
||||
|
||||
/* have to invalidate if we're not in the only usable case */
|
||||
if (!(prev == SCOUTFS_LOCK_WRITE && mode == SCOUTFS_LOCK_READ)) {
|
||||
retry:
|
||||
/* remove cov items to tell users that their cache is stale */
|
||||
/*
|
||||
* Remove cov items to tell users that their cache is
|
||||
* stale. The unlock pattern comes from avoiding bad
|
||||
* sparse warnings when taking else in a failed trylock.
|
||||
*/
|
||||
spin_lock(&lock->cov_list_lock);
|
||||
list_for_each_entry_safe(cov, tmp, &lock->cov_list, head) {
|
||||
if (!spin_trylock(&cov->cov_lock)) {
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
while ((cov = list_first_entry_or_null(&lock->cov_list,
|
||||
struct scoutfs_lock_coverage, head))) {
|
||||
if (spin_trylock(&cov->cov_lock)) {
|
||||
list_del_init(&cov->head);
|
||||
cov->lock = NULL;
|
||||
spin_unlock(&cov->cov_lock);
|
||||
scoutfs_inc_counter(sb, lock_invalidate_coverage);
|
||||
}
|
||||
list_del_init(&cov->head);
|
||||
cov->lock = NULL;
|
||||
spin_unlock(&cov->cov_lock);
|
||||
scoutfs_inc_counter(sb, lock_invalidate_coverage);
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
spin_lock(&lock->cov_list_lock);
|
||||
}
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
|
||||
@@ -297,6 +304,7 @@ static void lock_inc_count(unsigned int *counts, enum scoutfs_lock_mode mode)
|
||||
static void lock_dec_count(unsigned int *counts, enum scoutfs_lock_mode mode)
|
||||
{
|
||||
BUG_ON(mode < 0 || mode >= SCOUTFS_LOCK_NR_MODES);
|
||||
BUG_ON(counts[mode] == 0);
|
||||
counts[mode]--;
|
||||
}
|
||||
|
||||
@@ -1244,10 +1252,29 @@ int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode,
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(&end);
|
||||
end.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_totl_set_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_xattr_indx_get_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_quota_get_lock_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
@@ -1708,7 +1735,7 @@ int scoutfs_lock_setup(struct super_block *sb)
|
||||
linfo->lock_range_tree = RB_ROOT;
|
||||
KC_INIT_SHRINKER_FUNCS(&linfo->shrinker, lock_count_objects,
|
||||
lock_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&linfo->shrinker);
|
||||
KC_REGISTER_SHRINKER(&linfo->shrinker, "scoutfs-lock:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_LIST_HEAD(&linfo->lru_list);
|
||||
INIT_WORK(&linfo->inv_work, lock_invalidate_worker);
|
||||
INIT_LIST_HEAD(&linfo->inv_list);
|
||||
|
||||
@@ -86,6 +86,10 @@ int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
u64 ino, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
|
||||
@@ -202,21 +202,48 @@ static u8 invalidation_mode(u8 granted, u8 requested)
|
||||
|
||||
/*
|
||||
* Return true of the client lock instances described by the entries can
|
||||
* be granted at the same time. Typically this only means they're both
|
||||
* modes that are compatible between nodes. In addition there's the
|
||||
* special case where a read lock on a client is compatible with a write
|
||||
* lock on the same client because the client's cache covered by the
|
||||
* read lock is still valid if they get a write lock.
|
||||
* be granted at the same time. There's only three cases where this is
|
||||
* true.
|
||||
*
|
||||
* First, the two locks are both of the same mode that allows full
|
||||
* sharing -- read and write only. The only point of these modes is
|
||||
* that everyone can share them.
|
||||
*
|
||||
* Second, a write lock gives the client permission to read as well.
|
||||
* This means that a client can upgrade its read lock to a write lock
|
||||
* without having to invalidate the existing read and drop caches.
|
||||
*
|
||||
* Third, null locks are always compatible between clients. It's as
|
||||
* though the client with the null lock has no lock at all. But it's
|
||||
* never compatible with all locks on the client requesting null.
|
||||
* Sending invalidations for existing locks on a client when we get a
|
||||
* null request is how we resolve races in shrinking locks -- we turn it
|
||||
* into the unsolicited remote invalidation case.
|
||||
*
|
||||
* All other mode and client combinations can not be shared, most
|
||||
* typically a write lock invalidating all other non-write holders to
|
||||
* drop caches and force a read after the write has completed.
|
||||
*/
|
||||
static bool client_entries_compatible(struct client_lock_entry *granted,
|
||||
struct client_lock_entry *requested)
|
||||
{
|
||||
return (granted->mode == requested->mode &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ ||
|
||||
granted->mode == SCOUTFS_LOCK_WRITE_ONLY)) ||
|
||||
(granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE);
|
||||
/* only read and write_only can be full shared */
|
||||
if ((granted->mode == requested->mode) &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ || granted->mode == SCOUTFS_LOCK_WRITE_ONLY))
|
||||
return true;
|
||||
|
||||
/* _write includes reading, so a client can upgrade its read to write */
|
||||
if (granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE)
|
||||
return true;
|
||||
|
||||
/* null is always compatible across clients, never within a client */
|
||||
if ((granted->rid != requested->rid) &&
|
||||
(granted->mode == SCOUTFS_LOCK_NULL || requested->mode == SCOUTFS_LOCK_NULL))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -317,16 +344,18 @@ static void put_server_lock(struct lock_server_info *inf,
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
|
||||
if (atomic_dec_and_test(&snode->refcount) &&
|
||||
list_empty(&snode->granted) &&
|
||||
list_empty(&snode->requested) &&
|
||||
list_empty(&snode->invalidated)) {
|
||||
spin_lock(&inf->lock);
|
||||
rb_erase(&snode->node, &inf->locks_root);
|
||||
spin_unlock(&inf->lock);
|
||||
should_free = true;
|
||||
}
|
||||
|
||||
spin_unlock(&inf->lock);
|
||||
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
if (should_free) {
|
||||
|
||||
534
kmod/src/net.c
534
kmod/src/net.c
@@ -20,6 +20,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/jhash.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -31,6 +32,7 @@
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
#include "fence.h"
|
||||
#include "options.h"
|
||||
|
||||
/*
|
||||
* scoutfs networking delivers requests and responses between nodes.
|
||||
@@ -134,6 +136,7 @@ struct message_send {
|
||||
struct message_recv {
|
||||
struct scoutfs_tseq_entry tseq_entry;
|
||||
struct work_struct proc_work;
|
||||
struct list_head ordered_head;
|
||||
struct scoutfs_net_connection *conn;
|
||||
struct scoutfs_net_header nh;
|
||||
};
|
||||
@@ -332,7 +335,7 @@ static int submit_send(struct super_block *sb,
|
||||
return -EINVAL;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
return -ENOLINK;
|
||||
|
||||
msend = kmalloc(offsetof(struct message_send,
|
||||
nh.data[data_len]), GFP_NOFS);
|
||||
@@ -498,16 +501,61 @@ static void scoutfs_net_proc_worker(struct work_struct *work)
|
||||
trace_scoutfs_net_proc_work_exit(sb, 0, ret);
|
||||
}
|
||||
|
||||
static void scoutfs_net_ordered_proc_worker(struct work_struct *work)
|
||||
{
|
||||
struct scoutfs_work_list *wlist = container_of(work, struct scoutfs_work_list, work);
|
||||
struct message_recv *mrecv;
|
||||
struct message_recv *mrecv__;
|
||||
LIST_HEAD(list);
|
||||
|
||||
spin_lock(&wlist->lock);
|
||||
list_splice_init(&wlist->list, &list);
|
||||
spin_unlock(&wlist->lock);
|
||||
|
||||
list_for_each_entry_safe(mrecv, mrecv__, &list, ordered_head) {
|
||||
list_del_init(&mrecv->ordered_head);
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Some messages require in-order processing. But the scope of the
|
||||
* ordering isn't global. In the case of lock messages, it's per lock.
|
||||
* So for these messages we hash them to a number of ordered workers who
|
||||
* walk a list and call the usual work function in order. This replaced
|
||||
* first the proc work detecting OOO and re-ordering, and then only
|
||||
* calling proc from the one recv work context.
|
||||
*/
|
||||
static void queue_ordered_proc(struct scoutfs_net_connection *conn, struct message_recv *mrecv)
|
||||
{
|
||||
struct scoutfs_work_list *wlist;
|
||||
struct scoutfs_net_lock *nl;
|
||||
u32 h;
|
||||
|
||||
if (WARN_ON_ONCE(mrecv->nh.cmd != SCOUTFS_NET_CMD_LOCK ||
|
||||
le16_to_cpu(mrecv->nh.data_len) != sizeof(struct scoutfs_net_lock)))
|
||||
return scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
|
||||
nl = (void *)mrecv->nh.data;
|
||||
h = jhash(&nl->key, sizeof(struct scoutfs_key), 0x6fdd3cd5);
|
||||
wlist = &conn->ordered_proc_wlists[h % conn->ordered_proc_nr];
|
||||
|
||||
spin_lock(&wlist->lock);
|
||||
list_add_tail(&mrecv->ordered_head, &wlist->list);
|
||||
spin_unlock(&wlist->lock);
|
||||
queue_work(conn->workq, &wlist->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free live responses up to and including the seq by marking them dead
|
||||
* and moving them to the send queue to be freed.
|
||||
*/
|
||||
static int move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
static bool move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
int ret = 0;
|
||||
bool moved = false;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
@@ -519,20 +567,20 @@ static int move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
ret = 1;
|
||||
moved = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return moved;
|
||||
}
|
||||
|
||||
/* acks are processed inline in the recv worker */
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
int moved;
|
||||
bool moved;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) +
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) |
|
||||
move_acked_responses(conn, &conn->resend_queue, seq);
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
@@ -541,33 +589,17 @@ static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
queue_work(conn->workq, &conn->send_work);
|
||||
}
|
||||
|
||||
static int recvmsg_full(struct socket *sock, void *buf, unsigned len)
|
||||
static int k_recvmsg(struct socket *sock, void *buf, unsigned len)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct kvec kv;
|
||||
int ret;
|
||||
struct kvec kv = {
|
||||
.iov_base = buf,
|
||||
.iov_len = len,
|
||||
};
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_NOSIGNAL,
|
||||
};
|
||||
|
||||
while (len) {
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
kv.iov_base = buf;
|
||||
kv.iov_len = len;
|
||||
|
||||
#ifndef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
#else
|
||||
iov_iter_init(&msg.msg_iter, READ, (struct iovec *)&kv, len, 1);
|
||||
#endif
|
||||
ret = kernel_recvmsg(sock, &msg, &kv, 1, len, msg.msg_flags);
|
||||
if (ret <= 0)
|
||||
return -ECONNABORTED;
|
||||
|
||||
len -= ret;
|
||||
buf += ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return kernel_recvmsg(sock, &msg, &kv, 1, len, msg.msg_flags);
|
||||
}
|
||||
|
||||
static bool invalid_message(struct scoutfs_net_connection *conn,
|
||||
@@ -604,6 +636,72 @@ static bool invalid_message(struct scoutfs_net_connection *conn,
|
||||
return false;
|
||||
}
|
||||
|
||||
static int recv_one_message(struct super_block *sb, struct net_info *ninf,
|
||||
struct scoutfs_net_connection *conn, struct scoutfs_net_header *nh,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct message_recv *mrecv;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, net_recv_messages);
|
||||
scoutfs_add_counter(sb, net_recv_bytes, nh_bytes(data_len));
|
||||
trace_scoutfs_net_recv_message(sb, &conn->sockname, &conn->peername, nh);
|
||||
|
||||
/* caller's invalid message checked data len */
|
||||
mrecv = kmalloc(offsetof(struct message_recv, nh.data[data_len]), GFP_NOFS);
|
||||
if (!mrecv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mrecv->conn = conn;
|
||||
INIT_WORK(&mrecv->proc_work, scoutfs_net_proc_worker);
|
||||
INIT_LIST_HEAD(&mrecv->ordered_head);
|
||||
mrecv->nh = *nh;
|
||||
if (data_len)
|
||||
memcpy(mrecv->nh.data, (nh + 1), data_len);
|
||||
|
||||
if (nh->cmd == SCOUTFS_NET_CMD_GREETING) {
|
||||
/* greetings are out of band, no seq mechanics */
|
||||
set_conn_fl(conn, saw_greeting);
|
||||
|
||||
} else if (le64_to_cpu(nh->seq) <=
|
||||
atomic64_read(&conn->recv_seq)) {
|
||||
/* drop any resent duplicated messages */
|
||||
scoutfs_inc_counter(sb, net_recv_dropped_duplicate);
|
||||
kfree(mrecv);
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
} else {
|
||||
/* record that we've received sender's seq */
|
||||
atomic64_set(&conn->recv_seq, le64_to_cpu(nh->seq));
|
||||
/* and free our responses that sender has received */
|
||||
free_acked_responses(conn, le64_to_cpu(nh->recv_seq));
|
||||
}
|
||||
|
||||
scoutfs_tseq_add(&ninf->msg_tseq_tree, &mrecv->tseq_entry);
|
||||
|
||||
/*
|
||||
* Initial received greetings are processed inline
|
||||
* before any other incoming messages.
|
||||
*
|
||||
* Incoming requests or responses to the lock client
|
||||
* can't handle re-ordering, so they're queued to
|
||||
* ordered receive processing work.
|
||||
*/
|
||||
if (nh->cmd == SCOUTFS_NET_CMD_GREETING)
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
else if (nh->cmd == SCOUTFS_NET_CMD_LOCK && !conn->listening_conn)
|
||||
queue_ordered_proc(conn, mrecv);
|
||||
else
|
||||
queue_work(conn->workq, &mrecv->proc_work);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Always block receiving from the socket. Errors trigger shutting down
|
||||
* the connection.
|
||||
@@ -614,86 +712,72 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
struct super_block *sb = conn->sb;
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct socket *sock = conn->sock;
|
||||
struct scoutfs_net_header nh;
|
||||
struct message_recv *mrecv;
|
||||
struct scoutfs_net_header *nh;
|
||||
struct page *page = NULL;
|
||||
unsigned int data_len;
|
||||
int hdr_off;
|
||||
int rx_off;
|
||||
int size;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_recv_work_enter(sb, 0, 0);
|
||||
|
||||
page = alloc_page(GFP_NOFS);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hdr_off = 0;
|
||||
rx_off = 0;
|
||||
|
||||
for (;;) {
|
||||
/* receive the header */
|
||||
ret = recvmsg_full(sock, &nh, sizeof(nh));
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/* receiving an invalid message breaks the connection */
|
||||
if (invalid_message(conn, &nh)) {
|
||||
scoutfs_inc_counter(sb, net_recv_invalid_message);
|
||||
ret = -EBADMSG;
|
||||
break;
|
||||
ret = k_recvmsg(sock, page_address(page) + rx_off, PAGE_SIZE - rx_off);
|
||||
if (ret <= 0) {
|
||||
ret = -ECONNABORTED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
data_len = le16_to_cpu(nh.data_len);
|
||||
rx_off += ret;
|
||||
|
||||
scoutfs_inc_counter(sb, net_recv_messages);
|
||||
scoutfs_add_counter(sb, net_recv_bytes, nh_bytes(data_len));
|
||||
trace_scoutfs_net_recv_message(sb, &conn->sockname,
|
||||
&conn->peername, &nh);
|
||||
for (;;) {
|
||||
size = rx_off - hdr_off;
|
||||
if (size < sizeof(struct scoutfs_net_header))
|
||||
break;
|
||||
|
||||
/* invalid message checked data len */
|
||||
mrecv = kmalloc(offsetof(struct message_recv,
|
||||
nh.data[data_len]), GFP_NOFS);
|
||||
if (!mrecv) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
nh = page_address(page) + hdr_off;
|
||||
|
||||
/* receiving an invalid message breaks the connection */
|
||||
if (invalid_message(conn, nh)) {
|
||||
scoutfs_inc_counter(sb, net_recv_invalid_message);
|
||||
ret = -EBADMSG;
|
||||
break;
|
||||
}
|
||||
|
||||
data_len = le16_to_cpu(nh->data_len);
|
||||
if (sizeof(struct scoutfs_net_header) + data_len > size)
|
||||
break;
|
||||
|
||||
ret = recv_one_message(sb, ninf, conn, nh, data_len);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
hdr_off += sizeof(struct scoutfs_net_header) + data_len;
|
||||
}
|
||||
|
||||
mrecv->conn = conn;
|
||||
INIT_WORK(&mrecv->proc_work, scoutfs_net_proc_worker);
|
||||
mrecv->nh = nh;
|
||||
|
||||
/* receive the data payload */
|
||||
ret = recvmsg_full(sock, mrecv->nh.data, data_len);
|
||||
if (ret) {
|
||||
kfree(mrecv);
|
||||
break;
|
||||
if ((PAGE_SIZE - rx_off) <
|
||||
(sizeof(struct scoutfs_net_header) + SCOUTFS_NET_MAX_DATA_LEN)) {
|
||||
if (size)
|
||||
memmove(page_address(page), page_address(page) + hdr_off, size);
|
||||
hdr_off = 0;
|
||||
rx_off = size;
|
||||
}
|
||||
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING) {
|
||||
/* greetings are out of band, no seq mechanics */
|
||||
set_conn_fl(conn, saw_greeting);
|
||||
|
||||
} else if (le64_to_cpu(nh.seq) <=
|
||||
atomic64_read(&conn->recv_seq)) {
|
||||
/* drop any resent duplicated messages */
|
||||
scoutfs_inc_counter(sb, net_recv_dropped_duplicate);
|
||||
kfree(mrecv);
|
||||
continue;
|
||||
|
||||
} else {
|
||||
/* record that we've received sender's seq */
|
||||
atomic64_set(&conn->recv_seq, le64_to_cpu(nh.seq));
|
||||
/* and free our responses that sender has received */
|
||||
free_acked_responses(conn, le64_to_cpu(nh.recv_seq));
|
||||
}
|
||||
|
||||
scoutfs_tseq_add(&ninf->msg_tseq_tree, &mrecv->tseq_entry);
|
||||
|
||||
/*
|
||||
* Initial received greetings are processed
|
||||
* synchronously before any other incoming messages.
|
||||
*
|
||||
* Incoming requests or responses to the lock client are
|
||||
* called synchronously to avoid reordering.
|
||||
*/
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING ||
|
||||
(nh.cmd == SCOUTFS_NET_CMD_LOCK && !conn->listening_conn))
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
else
|
||||
queue_work(conn->workq, &mrecv->proc_work);
|
||||
}
|
||||
|
||||
out:
|
||||
__free_page(page);
|
||||
|
||||
if (ret)
|
||||
scoutfs_inc_counter(sb, net_recv_error);
|
||||
|
||||
@@ -703,33 +787,41 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
trace_scoutfs_net_recv_work_exit(sb, 0, ret);
|
||||
}
|
||||
|
||||
static int sendmsg_full(struct socket *sock, void *buf, unsigned len)
|
||||
/*
|
||||
* This consumes the kvec.
|
||||
*/
|
||||
static int k_sendmsg_full(struct socket *sock, struct kvec *kv, unsigned long nr_segs, size_t count)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct kvec kv;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
while (len) {
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
kv.iov_base = buf;
|
||||
kv.iov_len = len;
|
||||
while (count > 0) {
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_NOSIGNAL,
|
||||
};
|
||||
|
||||
#ifndef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
#else
|
||||
iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)&kv, len, 1);
|
||||
#endif
|
||||
ret = kernel_sendmsg(sock, &msg, &kv, 1, len);
|
||||
if (ret <= 0)
|
||||
return -ECONNABORTED;
|
||||
ret = kernel_sendmsg(sock, &msg, kv, nr_segs, count);
|
||||
if (ret <= 0) {
|
||||
ret = -ECONNABORTED;
|
||||
break;
|
||||
}
|
||||
|
||||
len -= ret;
|
||||
buf += ret;
|
||||
count -= ret;
|
||||
if (count) {
|
||||
while (nr_segs > 0 && ret >= kv->iov_len) {
|
||||
ret -= kv->iov_len;
|
||||
kv++;
|
||||
nr_segs--;
|
||||
}
|
||||
if (nr_segs > 0 && ret > 0) {
|
||||
kv->iov_base += ret;
|
||||
kv->iov_len -= ret;
|
||||
}
|
||||
BUG_ON(nr_segs == 0);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_msend(struct net_info *ninf, struct message_send *msend)
|
||||
@@ -760,54 +852,73 @@ static void scoutfs_net_send_worker(struct work_struct *work)
|
||||
struct super_block *sb = conn->sb;
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct message_send *msend;
|
||||
int ret = 0;
|
||||
struct message_send *_msend_;
|
||||
struct kvec kv[16];
|
||||
unsigned long nr_segs;
|
||||
size_t count;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_send_work_enter(sb, 0, 0);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
while ((msend = list_first_entry_or_null(&conn->send_queue,
|
||||
struct message_send, head))) {
|
||||
|
||||
if (msend->dead) {
|
||||
free_msend(ninf, msend);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((msend->nh.cmd == SCOUTFS_NET_CMD_FAREWELL) &&
|
||||
nh_is_response(&msend->nh)) {
|
||||
set_conn_fl(conn, saw_farewell);
|
||||
}
|
||||
|
||||
msend->nh.recv_seq =
|
||||
cpu_to_le64(atomic64_read(&conn->recv_seq));
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
len = nh_bytes(le16_to_cpu(msend->nh.data_len));
|
||||
|
||||
scoutfs_inc_counter(sb, net_send_messages);
|
||||
scoutfs_add_counter(sb, net_send_bytes, len);
|
||||
trace_scoutfs_net_send_message(sb, &conn->sockname,
|
||||
&conn->peername, &msend->nh);
|
||||
|
||||
ret = sendmsg_full(conn->sock, &msend->nh, len);
|
||||
for (;;) {
|
||||
nr_segs = 0;
|
||||
count = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
list_for_each_entry_safe(msend, _msend_, &conn->send_queue, head) {
|
||||
if (msend->dead) {
|
||||
free_msend(ninf, msend);
|
||||
continue;
|
||||
}
|
||||
|
||||
msend->nh.recv_seq = 0;
|
||||
len = nh_bytes(le16_to_cpu(msend->nh.data_len));
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
if ((msend->nh.cmd == SCOUTFS_NET_CMD_FAREWELL) &&
|
||||
nh_is_response(&msend->nh)) {
|
||||
set_conn_fl(conn, saw_farewell);
|
||||
}
|
||||
|
||||
/* resend if it wasn't freed while we sent */
|
||||
if (!msend->dead)
|
||||
list_move_tail(&msend->head, &conn->resend_queue);
|
||||
msend->nh.recv_seq = cpu_to_le64(atomic64_read(&conn->recv_seq));
|
||||
|
||||
scoutfs_inc_counter(sb, net_send_messages);
|
||||
scoutfs_add_counter(sb, net_send_bytes, len);
|
||||
trace_scoutfs_net_send_message(sb, &conn->sockname,
|
||||
&conn->peername, &msend->nh);
|
||||
|
||||
count += len;
|
||||
kv[nr_segs].iov_base = &msend->nh;
|
||||
kv[nr_segs].iov_len = len;
|
||||
if (++nr_segs == ARRAY_SIZE(kv))
|
||||
break;
|
||||
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
if (nr_segs == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = k_sendmsg_full(conn->sock, kv, nr_segs, count);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
list_for_each_entry_safe(msend, _msend_, &conn->send_queue, head) {
|
||||
msend->nh.recv_seq = 0;
|
||||
|
||||
/* resend if it wasn't freed while we sent */
|
||||
if (!msend->dead)
|
||||
list_move_tail(&msend->head, &conn->resend_queue);
|
||||
|
||||
if (--nr_segs == 0)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
out:
|
||||
if (ret) {
|
||||
scoutfs_inc_counter(sb, net_send_error);
|
||||
shutdown_conn(conn);
|
||||
@@ -862,6 +973,7 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
destroy_workqueue(conn->workq);
|
||||
scoutfs_tseq_del(&ninf->conn_tseq_tree, &conn->tseq_entry);
|
||||
kfree(conn->info);
|
||||
kfree(conn->ordered_proc_wlists);
|
||||
trace_scoutfs_conn_destroy_free(conn);
|
||||
kfree(conn);
|
||||
|
||||
@@ -887,7 +999,7 @@ static void destroy_conn(struct scoutfs_net_connection *conn)
|
||||
* The TCP_KEEP* and TCP_USER_TIMEOUT option interaction is subtle.
|
||||
* TCP_USER_TIMEOUT only applies if there is unacked written data in the
|
||||
* send queue. It doesn't work if the connection is idle. Adding
|
||||
* keepalice probes with user_timeout set changes how the keepalive
|
||||
* keepalive probes with user_timeout set changes how the keepalive
|
||||
* timeout is calculated. CNT no longer matters. Each time
|
||||
* additional probes (not the first) are sent the user timeout is
|
||||
* checked against the last time data was received. If none of the
|
||||
@@ -899,58 +1011,50 @@ static void destroy_conn(struct scoutfs_net_connection *conn)
|
||||
* elapses during the probe timer processing after the unsuccessful
|
||||
* probes.
|
||||
*/
|
||||
#define UNRESPONSIVE_TIMEOUT_SECS 10
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
static int sock_opts_and_names(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct socket *sock)
|
||||
{
|
||||
struct timeval tv;
|
||||
struct scoutfs_mount_options opts;
|
||||
int optval;
|
||||
int ret;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
/* we use a keepalive timeout instead of send timeout */
|
||||
tv.tv_sec = 0;
|
||||
tv.tv_usec = 0;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
ret = kc_sock_set_sndtimeo(sock, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* not checked when user_timeout != 0, but for clarity */
|
||||
optval = UNRESPONSIVE_PROBES;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
BUILD_BUG_ON(UNRESPONSIVE_PROBES >= UNRESPONSIVE_TIMEOUT_SECS);
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS - (UNRESPONSIVE_PROBES);
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
optval = (opts.tcp_keepalive_timeout_ms / MSEC_PER_SEC) - UNRESPONSIVE_PROBES;
|
||||
ret = kc_tcp_sock_set_keepidle(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_tcp_sock_set_keepintvl(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS * MSEC_PER_SEC;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
optval = opts.tcp_keepalive_timeout_ms;
|
||||
ret = kc_tcp_sock_set_user_timeout(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_tcp_sock_set_nodelay(sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1001,13 +1105,19 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
conn->notify_down,
|
||||
conn->info_size,
|
||||
conn->req_funcs, "accepted");
|
||||
/*
|
||||
* scoutfs_net_alloc_conn() can fail due to ENOMEM. If this
|
||||
* is the only thing that does so, there's no harm in trying
|
||||
* to see if kernel_accept() can get enough memory to try accepting
|
||||
* a new connection again. If that then fails with ENOMEM, it'll
|
||||
* shut down the conn anyway. So just retry here.
|
||||
*/
|
||||
if (!acc_conn) {
|
||||
sock_release(acc_sock);
|
||||
ret = -ENOMEM;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = sock_opts_and_names(acc_conn, acc_sock);
|
||||
ret = sock_opts_and_names(sb, acc_conn, acc_sock);
|
||||
if (ret) {
|
||||
sock_release(acc_sock);
|
||||
destroy_conn(acc_conn);
|
||||
@@ -1049,7 +1159,6 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
DEFINE_CONN_FROM_WORK(conn, work, connect_work);
|
||||
struct super_block *sb = conn->sb;
|
||||
struct socket *sock;
|
||||
struct timeval tv;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_connect_work_enter(sb, 0, 0);
|
||||
@@ -1060,11 +1169,8 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* caller specified connect timeout */
|
||||
tv.tv_sec = conn->connect_timeout_ms / MSEC_PER_SEC;
|
||||
tv.tv_usec = (conn->connect_timeout_ms % MSEC_PER_SEC) * USEC_PER_MSEC;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
/* caller specified connect timeout, defaults to 1 sec */
|
||||
ret = kc_sock_set_sndtimeo(sock, conn->connect_timeout_ms / MSEC_PER_SEC);
|
||||
if (ret) {
|
||||
sock_release(sock);
|
||||
goto out;
|
||||
@@ -1082,7 +1188,7 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = sock_opts_and_names(conn, sock);
|
||||
ret = sock_opts_and_names(sb, conn, sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1343,25 +1449,30 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
{
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct scoutfs_net_connection *conn;
|
||||
unsigned int nr;
|
||||
unsigned int i;
|
||||
|
||||
nr = min_t(unsigned int, num_possible_cpus(),
|
||||
PAGE_SIZE / sizeof(struct scoutfs_work_list));
|
||||
|
||||
conn = kzalloc(sizeof(struct scoutfs_net_connection), GFP_NOFS);
|
||||
if (!conn)
|
||||
return NULL;
|
||||
|
||||
if (info_size) {
|
||||
conn->info = kzalloc(info_size, GFP_NOFS);
|
||||
if (!conn->info) {
|
||||
kfree(conn);
|
||||
return NULL;
|
||||
}
|
||||
if (conn) {
|
||||
if (info_size)
|
||||
conn->info = kzalloc(info_size, GFP_NOFS);
|
||||
conn->ordered_proc_wlists = kmalloc_array(nr, sizeof(struct scoutfs_work_list),
|
||||
GFP_NOFS);
|
||||
conn->workq = alloc_workqueue("scoutfs_net_%s",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0,
|
||||
name_suffix);
|
||||
}
|
||||
|
||||
conn->workq = alloc_workqueue("scoutfs_net_%s",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0,
|
||||
name_suffix);
|
||||
if (!conn->workq) {
|
||||
kfree(conn->info);
|
||||
kfree(conn);
|
||||
if (!conn || (info_size && !conn->info) || !conn->workq || !conn->ordered_proc_wlists) {
|
||||
if (conn) {
|
||||
kfree(conn->info);
|
||||
kfree(conn->ordered_proc_wlists);
|
||||
if (conn->workq)
|
||||
destroy_workqueue(conn->workq);
|
||||
kfree(conn);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1391,6 +1502,13 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
INIT_DELAYED_WORK(&conn->reconn_free_dwork,
|
||||
scoutfs_net_reconn_free_worker);
|
||||
|
||||
conn->ordered_proc_nr = nr;
|
||||
for (i = 0; i < nr; i++) {
|
||||
INIT_WORK(&conn->ordered_proc_wlists[i].work, scoutfs_net_ordered_proc_worker);
|
||||
spin_lock_init(&conn->ordered_proc_wlists[i].lock);
|
||||
INIT_LIST_HEAD(&conn->ordered_proc_wlists[i].list);
|
||||
}
|
||||
|
||||
scoutfs_tseq_add(&ninf->conn_tseq_tree, &conn->tseq_entry);
|
||||
trace_scoutfs_conn_alloc(conn);
|
||||
|
||||
@@ -1462,8 +1580,8 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
#ifndef _SCOUTFS_NET_H_
|
||||
#define _SCOUTFS_NET_H_
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/in.h>
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
|
||||
struct scoutfs_work_list {
|
||||
struct work_struct work;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct scoutfs_net_connection;
|
||||
|
||||
/* These are called in their own blocking context */
|
||||
@@ -61,6 +69,8 @@ struct scoutfs_net_connection {
|
||||
struct list_head resend_queue;
|
||||
|
||||
atomic64_t recv_seq;
|
||||
unsigned int ordered_proc_nr;
|
||||
struct scoutfs_work_list *ordered_proc_wlists;
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct work_struct listen_work;
|
||||
|
||||
@@ -592,7 +592,7 @@ static int handle_request(struct super_block *sb, struct omap_request *req)
|
||||
ret = 0;
|
||||
out:
|
||||
free_rids(&priv_rids);
|
||||
if (ret < 0) {
|
||||
if ((ret < 0) && (req != NULL)) {
|
||||
ret = scoutfs_server_send_omap_response(sb, req->client_rid, req->client_id,
|
||||
NULL, ret);
|
||||
free_req(req);
|
||||
|
||||
@@ -39,6 +39,7 @@ enum {
|
||||
Opt_orphan_scan_delay_ms,
|
||||
Opt_quorum_heartbeat_timeout_ms,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_tcp_keepalive_timeout_ms,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
@@ -52,6 +53,7 @@ static const match_table_t tokens = {
|
||||
{Opt_orphan_scan_delay_ms, "orphan_scan_delay_ms=%s"},
|
||||
{Opt_quorum_heartbeat_timeout_ms, "quorum_heartbeat_timeout_ms=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_tcp_keepalive_timeout_ms, "tcp_keepalive_timeout_ms=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
@@ -126,6 +128,8 @@ static void free_options(struct scoutfs_mount_options *opts)
|
||||
#define MIN_DATA_PREALLOC_BLOCKS 1ULL
|
||||
#define MAX_DATA_PREALLOC_BLOCKS ((unsigned long long)SCOUTFS_BLOCK_SM_MAX)
|
||||
|
||||
#define DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
memset(opts, 0, sizeof(*opts));
|
||||
@@ -136,6 +140,7 @@ static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
opts->orphan_scan_delay_ms = -1;
|
||||
opts->quorum_heartbeat_timeout_ms = SCOUTFS_QUORUM_DEF_HB_TIMEO_MS;
|
||||
opts->quorum_slot_nr = -1;
|
||||
opts->tcp_keepalive_timeout_ms = DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
static int verify_log_merge_wait_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
@@ -168,6 +173,21 @@ static int verify_quorum_heartbeat_timeout_ms(struct super_block *sb, int ret, u
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_tcp_keepalive_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse tcp_keepalive_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val <= (UNRESPONSIVE_PROBES * MSEC_PER_SEC)) {
|
||||
scoutfs_err(sb, "invalid tcp_keepalive_timeout_ms value %d, must be larger than %lu",
|
||||
val, (UNRESPONSIVE_PROBES * MSEC_PER_SEC));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the option string into our options struct. This can allocate
|
||||
* memory in the struct. The caller is responsible for always calling
|
||||
@@ -218,6 +238,14 @@ static int parse_options(struct super_block *sb, char *options, struct scoutfs_m
|
||||
opts->data_prealloc_contig_only = nr;
|
||||
break;
|
||||
|
||||
case Opt_tcp_keepalive_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_tcp_keepalive_timeout_ms(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->tcp_keepalive_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_log_merge_wait_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, nr);
|
||||
@@ -371,6 +399,7 @@ int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
seq_printf(seq, ",orphan_scan_delay_ms=%u", opts.orphan_scan_delay_ms);
|
||||
if (opts.quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts.quorum_slot_nr);
|
||||
seq_printf(seq, ",tcp_keepalive_timeout_ms=%d", opts.tcp_keepalive_timeout_ms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -13,8 +13,11 @@ struct scoutfs_mount_options {
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
int quorum_slot_nr;
|
||||
u64 quorum_heartbeat_timeout_ms;
|
||||
int tcp_keepalive_timeout_ms;
|
||||
};
|
||||
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts);
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root);
|
||||
|
||||
|
||||
@@ -243,10 +243,6 @@ static int send_msg_members(struct super_block *sb, int type, u64 term, int only
|
||||
};
|
||||
struct sockaddr_in sin;
|
||||
struct msghdr mh = {
|
||||
#ifndef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
.msg_iov = (struct iovec *)&kv,
|
||||
.msg_iovlen = 1,
|
||||
#endif
|
||||
.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
|
||||
.msg_name = &sin,
|
||||
.msg_namelen = sizeof(sin),
|
||||
@@ -268,9 +264,7 @@ static int send_msg_members(struct super_block *sb, int type, u64 term, int only
|
||||
|
||||
scoutfs_quorum_slot_sin(&qinf->qconf, i, &sin);
|
||||
now = ktime_get();
|
||||
#ifdef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
iov_iter_init(&mh.msg_iter, WRITE, (struct iovec *)&kv, sizeof(qmes), 1);
|
||||
#endif
|
||||
|
||||
ret = kernel_sendmsg(qinf->sock, &mh, &kv, 1, kv.iov_len);
|
||||
if (ret != kv.iov_len)
|
||||
failed++;
|
||||
@@ -303,7 +297,6 @@ static int recv_msg(struct super_block *sb, struct quorum_host_msg *msg,
|
||||
DECLARE_QUORUM_INFO(sb, qinf);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_quorum_message qmes;
|
||||
struct timeval tv;
|
||||
ktime_t rel_to;
|
||||
ktime_t now;
|
||||
int ret;
|
||||
@@ -313,10 +306,6 @@ static int recv_msg(struct super_block *sb, struct quorum_host_msg *msg,
|
||||
.iov_len = sizeof(struct scoutfs_quorum_message),
|
||||
};
|
||||
struct msghdr mh = {
|
||||
#ifndef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
.msg_iov = (struct iovec *)&kv,
|
||||
.msg_iovlen = 1,
|
||||
#endif
|
||||
.msg_flags = MSG_NOSIGNAL,
|
||||
};
|
||||
|
||||
@@ -328,19 +317,12 @@ static int recv_msg(struct super_block *sb, struct quorum_host_msg *msg,
|
||||
else
|
||||
rel_to = ns_to_ktime(0);
|
||||
|
||||
tv = ktime_to_timeval(rel_to);
|
||||
if (tv.tv_sec == 0 && tv.tv_usec == 0) {
|
||||
if (ktime_compare(rel_to, ns_to_ktime(NSEC_PER_USEC)) <= 0) {
|
||||
mh.msg_flags |= MSG_DONTWAIT;
|
||||
} else {
|
||||
ret = kernel_setsockopt(qinf->sock, SOL_SOCKET, SO_RCVTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = kc_tcp_sock_set_rcvtimeo(qinf->sock, rel_to);
|
||||
}
|
||||
|
||||
#ifdef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
iov_iter_init(&mh.msg_iter, READ, (struct iovec *)&kv, sizeof(struct scoutfs_quorum_message), 1);
|
||||
#endif
|
||||
ret = kernel_recvmsg(qinf->sock, &mh, &kv, 1, kv.iov_len, mh.msg_flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -486,7 +468,7 @@ static void set_quorum_block_event(struct super_block *sb, struct scoutfs_quorum
|
||||
if (WARN_ON_ONCE(event < 0 || event >= SCOUTFS_QUORUM_EVENT_NR))
|
||||
return;
|
||||
|
||||
getnstimeofday64(&ts);
|
||||
ktime_get_ts64(&ts);
|
||||
le64_add_cpu(&blk->write_nr, 1);
|
||||
|
||||
ev = &blk->events[event];
|
||||
@@ -525,10 +507,10 @@ static int update_quorum_block(struct super_block *sb, int event, u64 term, bool
|
||||
set_quorum_block_event(sb, &blk, event, term);
|
||||
ret = write_quorum_block(sb, blkno, &blk);
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "error %d reading quorum block %llu to update event %d term %llu",
|
||||
scoutfs_err(sb, "error %d writing quorum block %llu after updating event %d term %llu",
|
||||
ret, blkno, event, term);
|
||||
} else {
|
||||
scoutfs_err(sb, "error %d writing quorum block %llu after updating event %d term %llu",
|
||||
scoutfs_err(sb, "error %d reading quorum block %llu to update event %d term %llu",
|
||||
ret, blkno, event, term);
|
||||
}
|
||||
|
||||
@@ -827,6 +809,7 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
|
||||
/* followers and candidates start new election on timeout */
|
||||
if (qst.role != LEADER &&
|
||||
msg.type == SCOUTFS_QUORUM_MSG_INVALID &&
|
||||
ktime_after(ktime_get(), qst.timeout)) {
|
||||
/* .. but only if their server has stopped */
|
||||
if (!scoutfs_server_is_down(sb)) {
|
||||
@@ -987,7 +970,10 @@ static void scoutfs_quorum_worker(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* record that this slot no longer has an active quorum */
|
||||
update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_END, qst.term, true);
|
||||
err = update_quorum_block(sb, SCOUTFS_QUORUM_EVENT_END, qst.term, true);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "quorum service saw error %d, shutting down. This mount is no longer participating in quorum. It should be remounted to restore service.",
|
||||
@@ -1076,7 +1062,7 @@ static char *role_str(int role)
|
||||
[LEADER] = "leader",
|
||||
};
|
||||
|
||||
if (role < 0 || role > ARRAY_SIZE(roles) || !roles[role])
|
||||
if (role < 0 || role >= ARRAY_SIZE(roles) || !roles[role])
|
||||
return "invalid";
|
||||
|
||||
return roles[role];
|
||||
@@ -1325,8 +1311,8 @@ int scoutfs_quorum_setup(struct super_block *sb)
|
||||
qinf = kzalloc(sizeof(struct quorum_info), GFP_KERNEL);
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_KERNEL);
|
||||
if (qinf)
|
||||
qinf->hb_delay = __vmalloc(HB_DELAY_NR * sizeof(struct count_recent),
|
||||
GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
|
||||
qinf->hb_delay = kc__vmalloc(HB_DELAY_NR * sizeof(struct count_recent),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!qinf || !super || !qinf->hb_delay) {
|
||||
if (qinf)
|
||||
vfree(qinf->hb_delay);
|
||||
|
||||
1266
kmod/src/quota.c
Normal file
1266
kmod/src/quota.c
Normal file
File diff suppressed because it is too large
Load Diff
48
kmod/src/quota.h
Normal file
48
kmod/src/quota.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef _SCOUTFS_QUOTA_H_
|
||||
#define _SCOUTFS_QUOTA_H_
|
||||
|
||||
#include "ioctl.h"
|
||||
|
||||
/*
|
||||
* Each rule's name can be in the ruleset's rbtree associated with the
|
||||
* source attr that it selects. This lets checks only test rules that
|
||||
* the inputs could match. The 'i' field indicates which name is in the
|
||||
* tree so we can find the containing rule.
|
||||
*
|
||||
* This is mostly private to quota.c but we expose it for tracing.
|
||||
*/
|
||||
struct squota_rule {
|
||||
u64 limit;
|
||||
u8 prio;
|
||||
u8 op;
|
||||
u8 rule_flags;
|
||||
struct squota_rule_name {
|
||||
struct rb_node node;
|
||||
u64 val;
|
||||
u8 source;
|
||||
u8 flags;
|
||||
u8 i;
|
||||
} names[3];
|
||||
};
|
||||
|
||||
/* private to quota.c, only here for tracing */
|
||||
struct squota_input {
|
||||
u64 attrs[SQ_NS__NR_SELECT];
|
||||
u8 op;
|
||||
};
|
||||
|
||||
int scoutfs_quota_check_inode(struct super_block *sb, struct inode *dir);
|
||||
int scoutfs_quota_check_data(struct super_block *sb, struct inode *inode);
|
||||
|
||||
int scoutfs_quota_get_rules(struct super_block *sb, u64 *iterator,
|
||||
struct scoutfs_ioctl_quota_rule *irules, int nr);
|
||||
int scoutfs_quota_mod_rule(struct super_block *sb, bool is_add,
|
||||
struct scoutfs_ioctl_quota_rule *irule);
|
||||
|
||||
void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_quota_invalidate(struct super_block *sb);
|
||||
|
||||
int scoutfs_quota_setup(struct super_block *sb);
|
||||
void scoutfs_quota_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
@@ -76,10 +76,10 @@ static struct recov_pending *lookup_pending(struct recov_info *recinf, u64 rid,
|
||||
* We keep the pending list sorted by rid so that we can iterate over
|
||||
* them. The list should be small and shouldn't be used often.
|
||||
*/
|
||||
static int cmp_pending_rid(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_pending_rid(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct recov_pending *a = list_entry(A, struct recov_pending, head);
|
||||
struct recov_pending *b = list_entry(B, struct recov_pending, head);
|
||||
KC_LIST_CMP_CONST struct recov_pending *a = list_entry(A, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
KC_LIST_CMP_CONST struct recov_pending *b = list_entry(B, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
|
||||
return scoutfs_cmp_u64s(a->rid, b->rid);
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/unaligned/access_ok.h>
|
||||
|
||||
#include "key.h"
|
||||
#include "format.h"
|
||||
@@ -37,6 +36,10 @@
|
||||
#include "net.h"
|
||||
#include "data.h"
|
||||
#include "ext.h"
|
||||
#include "quota.h"
|
||||
|
||||
#include "trace/quota.h"
|
||||
#include "trace/wkic.h"
|
||||
|
||||
struct lock_info;
|
||||
|
||||
@@ -283,6 +286,52 @@ TRACE_EVENT(scoutfs_data_alloc_block_enter,
|
||||
STE_ENTRY_ARGS(ext))
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_data_page_mkwrite,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, __u64 pos, __u32 ret),
|
||||
|
||||
TP_ARGS(sb, ino, pos, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, pos)
|
||||
__field(__u32, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->pos = pos;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu pos %llu ret %u ",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->pos, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_data_filemap_fault,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, __u64 pos, __u32 ret),
|
||||
|
||||
TP_ARGS(sb, ino, pos, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, pos)
|
||||
__field(__u32, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->pos = pos;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu pos %llu ret %u ",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->pos, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_data_file_extent_class,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, struct scoutfs_extent *ext),
|
||||
|
||||
@@ -774,13 +823,14 @@ DEFINE_EVENT(scoutfs_lock_info_class, scoutfs_lock_destroy,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_xattr_set,
|
||||
TP_PROTO(struct super_block *sb, size_t name_len, const void *value,
|
||||
size_t size, int flags),
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, size_t name_len,
|
||||
const void *value, size_t size, int flags),
|
||||
|
||||
TP_ARGS(sb, name_len, value, size, flags),
|
||||
TP_ARGS(sb, ino, name_len, value, size, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(size_t, name_len)
|
||||
__field(const void *, value)
|
||||
__field(size_t, size)
|
||||
@@ -789,15 +839,16 @@ TRACE_EVENT(scoutfs_xattr_set,
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->name_len = name_len;
|
||||
__entry->value = value;
|
||||
__entry->size = size;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" name_len %zu value %p size %zu flags 0x%x",
|
||||
SCSB_TRACE_ARGS, __entry->name_len, __entry->value,
|
||||
__entry->size, __entry->flags)
|
||||
TP_printk(SCSBF" ino %llu name_len %zu value %p size %zu flags 0x%x",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->name_len,
|
||||
__entry->value, __entry->size, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_advance_dirty_super,
|
||||
@@ -1043,9 +1094,12 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
__field(u64, refresh_gen)
|
||||
__field(u64, write_seq)
|
||||
__field(u64, dirty_trans_seq)
|
||||
__field(unsigned char, request_pending)
|
||||
__field(unsigned char, invalidate_pending)
|
||||
__field(int, mode)
|
||||
__field(int, invalidating_mode)
|
||||
__field(unsigned int, waiters_cw)
|
||||
__field(unsigned int, waiters_pr)
|
||||
__field(unsigned int, waiters_ex)
|
||||
@@ -1058,9 +1112,12 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
sk_trace_assign(start, &lck->start);
|
||||
sk_trace_assign(end, &lck->end);
|
||||
__entry->refresh_gen = lck->refresh_gen;
|
||||
__entry->write_seq = lck->write_seq;
|
||||
__entry->dirty_trans_seq = lck->dirty_trans_seq;
|
||||
__entry->request_pending = lck->request_pending;
|
||||
__entry->invalidate_pending = lck->invalidate_pending;
|
||||
__entry->mode = lck->mode;
|
||||
__entry->invalidating_mode = lck->invalidating_mode;
|
||||
__entry->waiters_pr = lck->waiters[SCOUTFS_LOCK_READ];
|
||||
__entry->waiters_ex = lck->waiters[SCOUTFS_LOCK_WRITE];
|
||||
__entry->waiters_cw = lck->waiters[SCOUTFS_LOCK_WRITE_ONLY];
|
||||
@@ -1068,10 +1125,11 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
__entry->users_ex = lck->users[SCOUTFS_LOCK_WRITE];
|
||||
__entry->users_cw = lck->users[SCOUTFS_LOCK_WRITE_ONLY];
|
||||
),
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" mode %u reqpnd %u invpnd %u rfrgen %llu waiters: pr %u ex %u cw %u users: pr %u ex %u cw %u",
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" mode %u invmd %u reqp %u invp %u refg %llu wris %llu dts %llu waiters: pr %u ex %u cw %u users: pr %u ex %u cw %u",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end),
|
||||
__entry->mode, __entry->request_pending,
|
||||
__entry->invalidate_pending, __entry->refresh_gen,
|
||||
__entry->mode, __entry->invalidating_mode, __entry->request_pending,
|
||||
__entry->invalidate_pending, __entry->refresh_gen, __entry->write_seq,
|
||||
__entry->dirty_trans_seq,
|
||||
__entry->waiters_pr, __entry->waiters_ex, __entry->waiters_cw,
|
||||
__entry->users_pr, __entry->users_ex, __entry->users_cw)
|
||||
);
|
||||
@@ -1910,15 +1968,17 @@ DEFINE_EVENT(scoutfs_server_client_count_class, scoutfs_server_client_down,
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_server_commit_users_class,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, committing,
|
||||
exceeded),
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, holding)
|
||||
__field(int, applying)
|
||||
__field(int, nr_holders)
|
||||
__field(u32, budget)
|
||||
__field(__u32, avail_before)
|
||||
__field(__u32, freed_before)
|
||||
__field(int, committing)
|
||||
@@ -1929,35 +1989,45 @@ DECLARE_EVENT_CLASS(scoutfs_server_commit_users_class,
|
||||
__entry->holding = !!holding;
|
||||
__entry->applying = !!applying;
|
||||
__entry->nr_holders = nr_holders;
|
||||
__entry->budget = budget;
|
||||
__entry->avail_before = avail_before;
|
||||
__entry->freed_before = freed_before;
|
||||
__entry->committing = !!committing;
|
||||
__entry->exceeded = !!exceeded;
|
||||
),
|
||||
TP_printk(SCSBF" holding %u applying %u nr %u avail_before %u freed_before %u committing %u exceeded %u",
|
||||
SCSB_TRACE_ARGS, __entry->holding, __entry->applying, __entry->nr_holders,
|
||||
__entry->avail_before, __entry->freed_before, __entry->committing,
|
||||
__entry->exceeded)
|
||||
TP_printk(SCSBF" holding %u applying %u nr %u budget %u avail_before %u freed_before %u committing %u exceeded %u",
|
||||
SCSB_TRACE_ARGS, __entry->holding, __entry->applying,
|
||||
__entry->nr_holders, __entry->budget,
|
||||
__entry->avail_before, __entry->freed_before,
|
||||
__entry->committing, __entry->exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_hold,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, committing, exceeded)
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_apply,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, committing, exceeded)
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_start,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, committing, exceeded)
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_end,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying, int nr_holders,
|
||||
u32 avail_before, u32 freed_before, int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, avail_before, freed_before, committing, exceeded)
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
|
||||
#define slt_symbolic(mode) \
|
||||
@@ -2395,10 +2465,69 @@ TRACE_EVENT(scoutfs_block_dirty_ref,
|
||||
__entry->block_blkno, __entry->block_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_get_file_block,
|
||||
TP_PROTO(struct super_block *sb, u64 blkno, int flags),
|
||||
|
||||
TP_ARGS(sb, blkno, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(int, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = blkno;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu flags 0x%x",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_block_stale,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block_header *hdr, u32 magic, u32 crc),
|
||||
|
||||
TP_ARGS(sb, ref, hdr, magic, crc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ref_blkno)
|
||||
__field(__u64, ref_seq)
|
||||
__field(__u32, hdr_crc)
|
||||
__field(__u32, hdr_magic)
|
||||
__field(__u64, hdr_fsid)
|
||||
__field(__u64, hdr_seq)
|
||||
__field(__u64, hdr_blkno)
|
||||
__field(__u32, magic)
|
||||
__field(__u32, crc)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ref_blkno = le64_to_cpu(ref->blkno);
|
||||
__entry->ref_seq = le64_to_cpu(ref->seq);
|
||||
__entry->hdr_crc = le32_to_cpu(hdr->crc);
|
||||
__entry->hdr_magic = le32_to_cpu(hdr->magic);
|
||||
__entry->hdr_fsid = le64_to_cpu(hdr->fsid);
|
||||
__entry->hdr_seq = le64_to_cpu(hdr->seq);
|
||||
__entry->hdr_blkno = le64_to_cpu(hdr->blkno);
|
||||
__entry->magic = magic;
|
||||
__entry->crc = crc;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ref_blkno %llu ref_seq %016llx hdr_crc %08x hdr_magic %08x hdr_fsid %016llx hdr_seq %016llx hdr_blkno %llu magic %08x crc %08x",
|
||||
SCSB_TRACE_ARGS, __entry->ref_blkno, __entry->ref_seq, __entry->hdr_crc,
|
||||
__entry->hdr_magic, __entry->hdr_fsid, __entry->hdr_seq, __entry->hdr_blkno,
|
||||
__entry->magic, __entry->crc)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno, int refcount, int io_count,
|
||||
unsigned long bits, __u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed),
|
||||
unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, bp)
|
||||
@@ -2406,7 +2535,6 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
__field(int, refcount)
|
||||
__field(int, io_count)
|
||||
__field(long, bits)
|
||||
__field(__u64, accessed)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
@@ -2415,71 +2543,65 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
__entry->refcount = refcount;
|
||||
__entry->io_count = io_count;
|
||||
__entry->bits = bits;
|
||||
__entry->accessed = accessed;
|
||||
),
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx accessed %llu",
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %x io_count %d bits 0x%lx",
|
||||
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno, __entry->refcount,
|
||||
__entry->io_count, __entry->bits, __entry->accessed)
|
||||
__entry->io_count, __entry->bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_allocate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_free,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_insert,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_remove,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_end_io,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_submit,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_invalidate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_mark_dirty,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_forget,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_shrink,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_isolate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_ext_next_class,
|
||||
@@ -2954,6 +3076,27 @@ DEFINE_EVENT(scoutfs_srch_compact_class, scoutfs_srch_compact_client_recv,
|
||||
TP_ARGS(sb, sc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_ioc_search_xattrs,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, u64 last_ino),
|
||||
|
||||
TP_ARGS(sb, ino, last_ino),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(u64, ino)
|
||||
__field(u64, last_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->last_ino = last_ino;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu last_ino %llu", SCSB_TRACE_ARGS,
|
||||
__entry->ino, __entry->last_ino)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_SCOUTFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
@@ -65,6 +65,7 @@ struct commit_users {
|
||||
struct list_head holding;
|
||||
struct list_head applying;
|
||||
unsigned int nr_holders;
|
||||
u32 budget;
|
||||
u32 avail_before;
|
||||
u32 freed_before;
|
||||
bool committing;
|
||||
@@ -84,8 +85,9 @@ static void init_commit_users(struct commit_users *cusers)
|
||||
do { \
|
||||
__typeof__(cusers) _cusers = (cusers); \
|
||||
trace_scoutfs_server_commit_##which(sb, !list_empty(&_cusers->holding), \
|
||||
!list_empty(&_cusers->applying), _cusers->nr_holders, _cusers->avail_before, \
|
||||
_cusers->freed_before, _cusers->committing, _cusers->exceeded); \
|
||||
!list_empty(&_cusers->applying), _cusers->nr_holders, _cusers->budget, \
|
||||
_cusers->avail_before, _cusers->freed_before, _cusers->committing, \
|
||||
_cusers->exceeded); \
|
||||
} while (0)
|
||||
|
||||
struct server_info {
|
||||
@@ -298,12 +300,11 @@ static void check_holder_budget(struct super_block *sb, struct server_info *serv
|
||||
{
|
||||
static bool exceeded_once = false;
|
||||
struct commit_hold *hold;
|
||||
struct timespec ts;
|
||||
struct timespec64 ts;
|
||||
u32 avail_used;
|
||||
u32 freed_used;
|
||||
u32 avail_now;
|
||||
u32 freed_now;
|
||||
u32 budget;
|
||||
|
||||
assert_spin_locked(&cusers->lock);
|
||||
|
||||
@@ -318,19 +319,18 @@ static void check_holder_budget(struct super_block *sb, struct server_info *serv
|
||||
else
|
||||
freed_used = SCOUTFS_ALLOC_LIST_MAX_BLOCKS - freed_now;
|
||||
|
||||
budget = cusers->nr_holders * COMMIT_HOLD_ALLOC_BUDGET;
|
||||
if (avail_used <= budget && freed_used <= budget)
|
||||
if (avail_used <= cusers->budget && freed_used <= cusers->budget)
|
||||
return;
|
||||
|
||||
exceeded_once = true;
|
||||
cusers->exceeded = cusers->nr_holders;
|
||||
|
||||
scoutfs_err(sb, "%u holders exceeded alloc budget av: bef %u now %u, fr: bef %u now %u",
|
||||
cusers->nr_holders, cusers->avail_before, avail_now,
|
||||
scoutfs_err(sb, "holders exceeded alloc budget %u av: bef %u now %u, fr: bef %u now %u",
|
||||
cusers->budget, cusers->avail_before, avail_now,
|
||||
cusers->freed_before, freed_now);
|
||||
|
||||
list_for_each_entry(hold, &cusers->holding, entry) {
|
||||
ts = ktime_to_timespec(hold->start);
|
||||
ts = ktime_to_timespec64(hold->start);
|
||||
scoutfs_err(sb, "exceeding hold start %llu.%09llu av %u fr %u",
|
||||
(u64)ts.tv_sec, (u64)ts.tv_nsec, hold->avail, hold->freed);
|
||||
hold->exceeded = true;
|
||||
@@ -349,7 +349,7 @@ static bool hold_commit(struct super_block *sb, struct server_info *server,
|
||||
{
|
||||
bool has_room;
|
||||
bool held;
|
||||
u32 budget;
|
||||
u32 new_budget;
|
||||
u32 av;
|
||||
u32 fr;
|
||||
|
||||
@@ -367,8 +367,8 @@ static bool hold_commit(struct super_block *sb, struct server_info *server,
|
||||
}
|
||||
|
||||
/* +2 for our additional hold and then for the final commit work the server does */
|
||||
budget = (cusers->nr_holders + 2) * COMMIT_HOLD_ALLOC_BUDGET;
|
||||
has_room = av >= budget && fr >= budget;
|
||||
new_budget = max(cusers->budget, (cusers->nr_holders + 2) * COMMIT_HOLD_ALLOC_BUDGET);
|
||||
has_room = av >= new_budget && fr >= new_budget;
|
||||
/* checking applying so holders drain once an apply caller starts waiting */
|
||||
held = !cusers->committing && has_room && list_empty(&cusers->applying);
|
||||
|
||||
@@ -388,6 +388,7 @@ static bool hold_commit(struct super_block *sb, struct server_info *server,
|
||||
list_add_tail(&hold->entry, &cusers->holding);
|
||||
|
||||
cusers->nr_holders++;
|
||||
cusers->budget = new_budget;
|
||||
|
||||
} else if (!has_room && cusers->nr_holders == 0 && !cusers->committing) {
|
||||
cusers->committing = true;
|
||||
@@ -445,7 +446,7 @@ static int server_apply_commit(struct super_block *sb, struct commit_hold *hold,
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct commit_users *cusers = &server->cusers;
|
||||
struct timespec ts;
|
||||
struct timespec64 ts;
|
||||
|
||||
spin_lock(&cusers->lock);
|
||||
|
||||
@@ -454,7 +455,7 @@ static int server_apply_commit(struct super_block *sb, struct commit_hold *hold,
|
||||
check_holder_budget(sb, server, cusers);
|
||||
|
||||
if (hold->exceeded) {
|
||||
ts = ktime_to_timespec(hold->start);
|
||||
ts = ktime_to_timespec64(hold->start);
|
||||
scoutfs_err(sb, "exceeding hold start %llu.%09llu stack:",
|
||||
(u64)ts.tv_sec, (u64)ts.tv_nsec);
|
||||
dump_stack();
|
||||
@@ -516,6 +517,7 @@ static void commit_end(struct super_block *sb, struct commit_users *cusers, int
|
||||
list_for_each_entry_safe(hold, tmp, &cusers->applying, entry)
|
||||
list_del_init(&hold->entry);
|
||||
cusers->committing = false;
|
||||
cusers->budget = 0;
|
||||
spin_unlock(&cusers->lock);
|
||||
|
||||
wake_up(&cusers->waitq);
|
||||
@@ -608,7 +610,7 @@ static void scoutfs_server_commit_func(struct work_struct *work)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
ret = -ENOLINK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1038,6 +1040,101 @@ static int next_log_merge_item(struct super_block *sb,
|
||||
return next_log_merge_item_key(sb, root, zone, &key, val, val_len);
|
||||
}
|
||||
|
||||
static int do_finalize_ours(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt,
|
||||
struct commit_hold *hold)
|
||||
{
|
||||
struct server_info *server = SCOUTFS_SB(sb)->server_info;
|
||||
struct scoutfs_super_block *super = DIRTY_SUPER_SB(sb);
|
||||
struct scoutfs_key key;
|
||||
char *err_str = NULL;
|
||||
u64 rid = le64_to_cpu(lt->rid);
|
||||
bool more;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
mutex_lock(&server->srch_mutex);
|
||||
ret = scoutfs_srch_rotate_log(sb, &server->alloc, &server->wri,
|
||||
&super->srch_root, <->srch_file, true);
|
||||
mutex_unlock(&server->srch_mutex);
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "error rotating srch log for rid %016llx: %d",
|
||||
rid, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
do {
|
||||
more = false;
|
||||
|
||||
/*
|
||||
* All of these can return errors, perhaps indicating successful
|
||||
* partial progress, after having modified the allocator trees.
|
||||
* We always have to update the roots in the log item.
|
||||
*/
|
||||
mutex_lock(&server->alloc_mutex);
|
||||
ret = (err_str = "splice meta_freed to other_freed",
|
||||
scoutfs_alloc_splice_list(sb, &server->alloc,
|
||||
&server->wri, server->other_freed,
|
||||
<->meta_freed)) ?:
|
||||
(err_str = "splice meta_avail",
|
||||
scoutfs_alloc_splice_list(sb, &server->alloc,
|
||||
&server->wri, server->other_freed,
|
||||
<->meta_avail)) ?:
|
||||
(err_str = "empty data_avail",
|
||||
alloc_move_empty(sb, &super->data_alloc,
|
||||
<->data_avail,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2)) ?:
|
||||
(err_str = "empty data_freed",
|
||||
alloc_move_empty(sb, &super->data_alloc,
|
||||
<->data_freed,
|
||||
COMMIT_HOLD_ALLOC_BUDGET / 2));
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
|
||||
/*
|
||||
* only finalize, allowing merging, once the allocators are
|
||||
* fully freed
|
||||
*/
|
||||
if (ret == 0) {
|
||||
/* the transaction is no longer open */
|
||||
le64_add_cpu(<->flags, SCOUTFS_LOG_TREES_FINALIZED);
|
||||
lt->finalize_seq = cpu_to_le64(scoutfs_server_next_seq(sb));
|
||||
}
|
||||
|
||||
scoutfs_key_init_log_trees(&key, rid, le64_to_cpu(lt->nr));
|
||||
|
||||
err = scoutfs_btree_update(sb, &server->alloc, &server->wri,
|
||||
&super->logs_root, &key, lt,
|
||||
sizeof(*lt));
|
||||
BUG_ON(err != 0); /* alloc, log, srch items out of sync */
|
||||
|
||||
if (ret == -EINPROGRESS) {
|
||||
more = true;
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
ret = server_apply_commit(sb, hold, 0);
|
||||
if (ret < 0)
|
||||
WARN_ON_ONCE(ret < 0);
|
||||
server_hold_commit(sb, hold);
|
||||
mutex_lock(&server->logs_mutex);
|
||||
} else if (ret == 0) {
|
||||
memset(<->item_root, 0, sizeof(lt->item_root));
|
||||
memset(<->bloom_ref, 0, sizeof(lt->bloom_ref));
|
||||
lt->inode_count_delta = 0;
|
||||
lt->max_item_seq = 0;
|
||||
lt->finalize_seq = 0;
|
||||
le64_add_cpu(<->nr, 1);
|
||||
lt->flags = 0;
|
||||
}
|
||||
} while (more);
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb,
|
||||
"error %d finalizing log trees for rid %016llx: %s",
|
||||
ret, rid, err_str);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finalizing the log btrees for merging needs to be done carefully so
|
||||
* that items don't appear to go backwards in time.
|
||||
@@ -1089,7 +1186,6 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
struct scoutfs_log_merge_range rng;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_log_trees each_lt;
|
||||
struct scoutfs_log_trees fin;
|
||||
unsigned int delay_ms;
|
||||
unsigned long timeo;
|
||||
bool saw_finalized;
|
||||
@@ -1160,6 +1256,7 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
/* done if we're not finalizing and there's no finalized */
|
||||
if (!finalize_ours && !saw_finalized) {
|
||||
ret = 0;
|
||||
scoutfs_inc_counter(sb, log_merge_no_finalized);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1194,32 +1291,11 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
|
||||
/* Finalize ours if it's visible to others */
|
||||
if (ours_visible) {
|
||||
fin = *lt;
|
||||
memset(&fin.meta_avail, 0, sizeof(fin.meta_avail));
|
||||
memset(&fin.meta_freed, 0, sizeof(fin.meta_freed));
|
||||
memset(&fin.data_avail, 0, sizeof(fin.data_avail));
|
||||
memset(&fin.data_freed, 0, sizeof(fin.data_freed));
|
||||
memset(&fin.srch_file, 0, sizeof(fin.srch_file));
|
||||
le64_add_cpu(&fin.flags, SCOUTFS_LOG_TREES_FINALIZED);
|
||||
fin.finalize_seq = cpu_to_le64(scoutfs_server_next_seq(sb));
|
||||
|
||||
scoutfs_key_init_log_trees(&key, le64_to_cpu(fin.rid),
|
||||
le64_to_cpu(fin.nr));
|
||||
ret = scoutfs_btree_update(sb, &server->alloc, &server->wri,
|
||||
&super->logs_root, &key, &fin,
|
||||
sizeof(fin));
|
||||
ret = do_finalize_ours(sb, lt, hold);
|
||||
if (ret < 0) {
|
||||
err_str = "updating finalized log_trees";
|
||||
err_str = "finalizing ours";
|
||||
break;
|
||||
}
|
||||
|
||||
memset(<->item_root, 0, sizeof(lt->item_root));
|
||||
memset(<->bloom_ref, 0, sizeof(lt->bloom_ref));
|
||||
lt->inode_count_delta = 0;
|
||||
lt->max_item_seq = 0;
|
||||
lt->finalize_seq = 0;
|
||||
le64_add_cpu(<->nr, 1);
|
||||
lt->flags = 0;
|
||||
}
|
||||
|
||||
/* wait a bit for mounts to arrive */
|
||||
@@ -1299,12 +1375,10 @@ static int finalize_and_start_log_merge(struct super_block *sb, struct scoutfs_l
|
||||
* is nested inside holding commits so we recheck the persistent item
|
||||
* each time we commit to make sure it's still what we think. The
|
||||
* caller is still going to send the item to the client so we update the
|
||||
* caller's each time we make progress. This is a best-effort attempt
|
||||
* to clean up and it's valid to leave extents in data_freed we don't
|
||||
* return errors to the caller. The client will continue the work later
|
||||
* in get_log_trees or as the rid is reclaimed.
|
||||
* caller's each time we make progress. If we hit an error applying the
|
||||
* changes we make then we can't send the log_trees to the client.
|
||||
*/
|
||||
static void try_drain_data_freed(struct super_block *sb, struct scoutfs_log_trees *lt)
|
||||
static int try_drain_data_freed(struct super_block *sb, struct scoutfs_log_trees *lt)
|
||||
{
|
||||
DECLARE_SERVER_INFO(sb, server);
|
||||
struct scoutfs_super_block *super = DIRTY_SUPER_SB(sb);
|
||||
@@ -1313,6 +1387,7 @@ static void try_drain_data_freed(struct super_block *sb, struct scoutfs_log_tree
|
||||
struct scoutfs_log_trees drain;
|
||||
struct scoutfs_key key;
|
||||
COMMIT_HOLD(hold);
|
||||
bool apply = false;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
@@ -1321,22 +1396,27 @@ static void try_drain_data_freed(struct super_block *sb, struct scoutfs_log_tree
|
||||
while (lt->data_freed.total_len != 0) {
|
||||
server_hold_commit(sb, &hold);
|
||||
mutex_lock(&server->logs_mutex);
|
||||
apply = true;
|
||||
|
||||
ret = find_log_trees_item(sb, &super->logs_root, false, rid, U64_MAX, &drain);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* careful to only keep draining the caller's specific open trans */
|
||||
if (drain.nr != lt->nr || drain.get_trans_seq != lt->get_trans_seq ||
|
||||
drain.commit_trans_seq != lt->commit_trans_seq || drain.flags != lt->flags) {
|
||||
ret = -ENOENT;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_dirty(sb, &server->alloc, &server->wri,
|
||||
&super->logs_root, &key);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* moving can modify and return errors, always update caller and item */
|
||||
mutex_lock(&server->alloc_mutex);
|
||||
@@ -1352,19 +1432,19 @@ static void try_drain_data_freed(struct super_block *sb, struct scoutfs_log_tree
|
||||
BUG_ON(err < 0); /* dirtying must guarantee success */
|
||||
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
if (ret < 0) {
|
||||
ret = 0; /* don't try to abort, ignoring ret */
|
||||
apply = false;
|
||||
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* try to cleanly abort and write any partial dirty btree blocks, but ignore result */
|
||||
if (ret < 0) {
|
||||
if (apply) {
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
server_apply_commit(sb, &hold, 0);
|
||||
server_apply_commit(sb, &hold, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1572,9 +1652,9 @@ out:
|
||||
scoutfs_err(sb, "error %d getting log trees for rid %016llx: %s",
|
||||
ret, rid, err_str);
|
||||
|
||||
/* try to drain excessive data_freed with additional commits, if needed, ignoring err */
|
||||
/* try to drain excessive data_freed with additional commits, if needed */
|
||||
if (ret == 0)
|
||||
try_drain_data_freed(sb, <);
|
||||
ret = try_drain_data_freed(sb, <);
|
||||
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, <, sizeof(lt));
|
||||
}
|
||||
@@ -1674,8 +1754,8 @@ unlock:
|
||||
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "server error %d committing client logs for rid %016llx: %s",
|
||||
ret, rid, err_str);
|
||||
scoutfs_err(sb, "server error %d committing client logs for rid %016llx, nr %llu: %s",
|
||||
ret, rid, le64_to_cpu(lt.nr), err_str);
|
||||
out:
|
||||
WARN_ON_ONCE(ret < 0);
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
|
||||
@@ -1810,6 +1890,9 @@ static int reclaim_open_log_tree(struct super_block *sb, u64 rid)
|
||||
out:
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
|
||||
if (ret == 0)
|
||||
scoutfs_inc_counter(sb, reclaimed_open_logs);
|
||||
|
||||
if (ret < 0 && ret != -EINPROGRESS)
|
||||
scoutfs_err(sb, "server error %d reclaiming log trees for rid %016llx: %s",
|
||||
ret, rid, err_str);
|
||||
@@ -2051,7 +2134,7 @@ static int server_srch_commit_compact(struct super_block *sb,
|
||||
&super->srch_root, rid, sc,
|
||||
&av, &fr);
|
||||
mutex_unlock(&server->srch_mutex);
|
||||
if (ret < 0) /* XXX very bad, leaks allocators */
|
||||
if (ret < 0)
|
||||
goto apply;
|
||||
|
||||
/* reclaim allocators if they were set by _srch_commit_ */
|
||||
@@ -2061,10 +2144,10 @@ static int server_srch_commit_compact(struct super_block *sb,
|
||||
scoutfs_alloc_splice_list(sb, &server->alloc, &server->wri,
|
||||
server->other_freed, &fr);
|
||||
mutex_unlock(&server->alloc_mutex);
|
||||
WARN_ON(ret < 0); /* XXX leaks allocators */
|
||||
apply:
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
out:
|
||||
WARN_ON(ret < 0); /* XXX leaks allocators */
|
||||
return scoutfs_net_response(sb, conn, cmd, id, ret, NULL, 0);
|
||||
}
|
||||
|
||||
@@ -2527,7 +2610,7 @@ static void server_log_merge_free_work(struct work_struct *work)
|
||||
|
||||
ret = scoutfs_btree_free_blocks(sb, &server->alloc,
|
||||
&server->wri, &fr.key,
|
||||
&fr.root, COMMIT_HOLD_ALLOC_BUDGET / 2);
|
||||
&fr.root, COMMIT_HOLD_ALLOC_BUDGET / 8);
|
||||
if (ret < 0) {
|
||||
err_str = "freeing log btree";
|
||||
break;
|
||||
@@ -2546,7 +2629,7 @@ static void server_log_merge_free_work(struct work_struct *work)
|
||||
/* freed blocks are in allocator, we *have* to update fr */
|
||||
BUG_ON(ret < 0);
|
||||
|
||||
if (server_hold_alloc_used_since(sb, &hold) >= COMMIT_HOLD_ALLOC_BUDGET / 2) {
|
||||
if (server_hold_alloc_used_since(sb, &hold) >= (COMMIT_HOLD_ALLOC_BUDGET * 3) / 4) {
|
||||
mutex_unlock(&server->logs_mutex);
|
||||
ret = server_apply_commit(sb, &hold, ret);
|
||||
commit = false;
|
||||
@@ -4149,7 +4232,7 @@ static void fence_pending_recov_worker(struct work_struct *work)
|
||||
struct server_info *server = container_of(work, struct server_info,
|
||||
fence_pending_recov_work);
|
||||
struct super_block *sb = server->sb;
|
||||
union scoutfs_inet_addr addr;
|
||||
union scoutfs_inet_addr addr = {{0,}};
|
||||
u64 rid = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
45
kmod/src/sparse-filtered.sh
Executable file
45
kmod/src/sparse-filtered.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Unfortunately, kernels can ship which contain sparse errors that are
|
||||
# unrelated to us.
|
||||
#
|
||||
# The exit status of this filtering wrapper will indicate an error if
|
||||
# sparse wasn't found or if there were any unfiltered output lines. It
|
||||
# can hide error exit status from sparse or grep if they don't produce
|
||||
# output that makes it past the filters.
|
||||
#
|
||||
|
||||
# must have sparse. Fail with error message, mask success path.
|
||||
which sparse > /dev/null || exit 1
|
||||
|
||||
# initial unmatchable, additional added as RE+="|..."
|
||||
RE="$^"
|
||||
|
||||
#
|
||||
# Darn. sparse has multi-line error messages, and I'd rather not bother
|
||||
# with multi-line filters. So we'll just drop this context.
|
||||
#
|
||||
# command-line: note: in included file (through include/linux/netlink.h, include/linux/ethtool.h, include/linux/netdevice.h, include/net/sock.h, /root/scoutfs/kmod/src/kernelcompat.h, builtin):
|
||||
# fprintf(stderr, "%s: note: in included file%s:\n",
|
||||
#
|
||||
RE+="|: note: in included file"
|
||||
|
||||
# 3.10.0-1160.119.1.el7.x86_64.debug
|
||||
# include/linux/posix_acl.h:138:9: warning: incorrect type in assignment (different address spaces)
|
||||
# include/linux/posix_acl.h:138:9: expected struct posix_acl *<noident>
|
||||
# include/linux/posix_acl.h:138:9: got struct posix_acl [noderef] <asn:4>*<noident>
|
||||
RE+="|include/linux/posix_acl.h:"
|
||||
|
||||
# 3.10.0-1160.119.1.el7.x86_64.debug
|
||||
#include/uapi/linux/perf_event.h:146:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
|
||||
RE+="|include/uapi/linux/perf_event.h:"
|
||||
|
||||
# 4.18.0-513.24.1.el8_9.x86_64+debug'
|
||||
#./include/linux/skbuff.h:824:1: warning: directive in macro's argument list
|
||||
RE+="|include/linux/skbuff.h:"
|
||||
|
||||
sparse "$@" |& \
|
||||
grep -E -v "($RE)" |& \
|
||||
awk '{ print $0 } END { exit NR > 0 }'
|
||||
exit $?
|
||||
150
kmod/src/srch.c
150
kmod/src/srch.c
@@ -18,6 +18,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "format.h"
|
||||
@@ -61,7 +62,7 @@
|
||||
* re-allocated and re-written. Search can restart by checking the
|
||||
* btree for the current set of files. Compaction reads log files which
|
||||
* are protected from other compactions by the persistent busy items
|
||||
* created by the server. Compaction won't see it's blocks reused out
|
||||
* created by the server. Compaction won't see its blocks reused out
|
||||
* from under it, but it can encounter stale cached blocks that need to
|
||||
* be invalidated.
|
||||
*/
|
||||
@@ -441,6 +442,10 @@ out:
|
||||
if (ret == 0 && (flags & GFB_INSERT) && blk >= le64_to_cpu(sfl->blocks))
|
||||
sfl->blocks = cpu_to_le64(blk + 1);
|
||||
|
||||
if (bl) {
|
||||
trace_scoutfs_get_file_block(sb, bl->blkno, flags);
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
@@ -532,23 +537,35 @@ out:
|
||||
* the pairs cancel each other out by all readers (the second encoding
|
||||
* looks like deletion) so they aren't visible to the first/last bounds of
|
||||
* the block or file.
|
||||
*
|
||||
* We use the same entry repeatedly, so the diff between them will be empty.
|
||||
* This lets us just emit the two-byte count word, leaving the other bytes
|
||||
* as zero.
|
||||
*
|
||||
* Split the desired total len into two pieces, adding any remainder to the
|
||||
* first four-bit value.
|
||||
*/
|
||||
static int append_padded_entry(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb, struct scoutfs_srch_entry *sre)
|
||||
static void append_padded_entry(struct scoutfs_srch_file *sfl,
|
||||
struct scoutfs_srch_block *srb,
|
||||
int len)
|
||||
{
|
||||
int ret;
|
||||
int each;
|
||||
int rem;
|
||||
u16 lengths = 0;
|
||||
u8 *buf = srb->entries + le32_to_cpu(srb->entry_bytes);
|
||||
|
||||
ret = encode_entry(srb->entries + le32_to_cpu(srb->entry_bytes),
|
||||
sre, &srb->tail);
|
||||
if (ret > 0) {
|
||||
srb->tail = *sre;
|
||||
le32_add_cpu(&srb->entry_nr, 1);
|
||||
le32_add_cpu(&srb->entry_bytes, ret);
|
||||
le64_add_cpu(&sfl->entries, 1);
|
||||
ret = 0;
|
||||
}
|
||||
each = (len - 2) >> 1;
|
||||
rem = (len - 2) & 1;
|
||||
|
||||
return ret;
|
||||
lengths |= each + rem;
|
||||
lengths |= each << 4;
|
||||
|
||||
memset(buf, 0, len);
|
||||
put_unaligned_le16(lengths, buf);
|
||||
|
||||
le32_add_cpu(&srb->entry_nr, 1);
|
||||
le32_add_cpu(&srb->entry_bytes, len);
|
||||
le64_add_cpu(&sfl->entries, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -559,61 +576,41 @@ static int append_padded_entry(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
* This is called when there is a single existing entry in the block.
|
||||
* We have the entire block to work with. We encode pairs of matching
|
||||
* entries. This hides them from readers (both searches and merging) as
|
||||
* they're interpreted as creation and deletion and are deleted. We use
|
||||
* the existing hash value of the first entry in the block but then set
|
||||
* the inode to an impossibly large number so it doesn't interfere with
|
||||
* anything.
|
||||
* they're interpreted as creation and deletion and are deleted.
|
||||
*
|
||||
* To hit the specific offset we very carefully manage the amount of
|
||||
* bytes of change between fields in the entry. We know that if we
|
||||
* change all the byte of the ino and id we end up with a 20 byte
|
||||
* (2+8+8,2) encoding of the pair of entries. To have the last entry
|
||||
* start at the _SAFE_POS offset we know that the final 20 byte pair
|
||||
* encoding needs to end at 2 bytes (second entry encoding) after the
|
||||
* _SAFE_POS offset.
|
||||
* For simplicity and to maintain sort ordering within the block, we reuse
|
||||
* the existing entry. This lets us skip the encoding step, because we know
|
||||
* the diff will be zero. We can zero-pad the resulting entries to hit the
|
||||
* target offset exactly.
|
||||
*
|
||||
* So as we encode pairs we watch the delta of our current offset from
|
||||
* that desired final offset of 2 past _SAFE_POS. If we're a multiple
|
||||
* of 20 away then we encode the full 20 byte pairs. If we're not, then
|
||||
* we drop a byte to encode 19 bytes. That'll slowly change the offset
|
||||
* to be a multiple of 20 again while encoding large entries.
|
||||
* Because we can't predict the exact number of entry_bytes when we start,
|
||||
* we adjust the byte count of subsequent entries until we wind up at a
|
||||
* multiple of 20 bytes away from our goal and then use that length for
|
||||
* the remaining entries.
|
||||
*
|
||||
* We could just use a single pair of unnaturally large entries to consume
|
||||
* the needed space, adjusting for an odd number of entry_bytes if necessary.
|
||||
* The use of 19 or 20 bytes for the entry pair matches what we would see with
|
||||
* real (non-zero) entries that vary from the existing entry.
|
||||
*/
|
||||
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl,
|
||||
struct scoutfs_srch_block *srb)
|
||||
{
|
||||
struct scoutfs_srch_entry sre;
|
||||
u32 target;
|
||||
s32 diff;
|
||||
u64 hash;
|
||||
u64 ino;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
hash = le64_to_cpu(srb->tail.hash);
|
||||
ino = le64_to_cpu(srb->tail.ino) | (1ULL << 62);
|
||||
id = le64_to_cpu(srb->tail.id);
|
||||
|
||||
target = SCOUTFS_SRCH_BLOCK_SAFE_BYTES + 2;
|
||||
|
||||
while ((diff = target - le32_to_cpu(srb->entry_bytes)) > 0) {
|
||||
ino ^= 1ULL << (7 * 8);
|
||||
append_padded_entry(sfl, srb, 10);
|
||||
if (diff % 20 == 0) {
|
||||
id ^= 1ULL << (7 * 8);
|
||||
append_padded_entry(sfl, srb, 10);
|
||||
} else {
|
||||
id ^= 1ULL << (6 * 8);
|
||||
append_padded_entry(sfl, srb, 9);
|
||||
}
|
||||
|
||||
sre.hash = cpu_to_le64(hash);
|
||||
sre.ino = cpu_to_le64(ino);
|
||||
sre.id = cpu_to_le64(id);
|
||||
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
if (ret == 0)
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
diff = target - le32_to_cpu(srb->entry_bytes);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(diff != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -748,14 +745,14 @@ static int search_log_file(struct super_block *sb,
|
||||
for (i = 0; i < le32_to_cpu(srb->entry_nr); i++) {
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, &sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
@@ -858,15 +855,15 @@ static int search_sorted_file(struct super_block *sb,
|
||||
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
break;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, &sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
break;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
pos += ret;
|
||||
prev = sre;
|
||||
@@ -971,6 +968,8 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
|
||||
scoutfs_inc_counter(sb, srch_search_xattrs);
|
||||
|
||||
trace_scoutfs_ioc_search_xattrs(sb, ino, last_ino);
|
||||
|
||||
*done = false;
|
||||
srch_init_rb_root(sroot);
|
||||
|
||||
@@ -1407,7 +1406,7 @@ int scoutfs_srch_commit_compact(struct super_block *sb,
|
||||
ret = -EIO;
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) /* XXX leaks allocators */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* restore busy to pending if the operation failed */
|
||||
@@ -1427,10 +1426,8 @@ int scoutfs_srch_commit_compact(struct super_block *sb,
|
||||
/* update file references if we finished compaction (!deleting) */
|
||||
if (!(res->flags & SCOUTFS_SRCH_COMPACT_FLAG_DELETE)) {
|
||||
ret = commit_files(sb, alloc, wri, root, res);
|
||||
if (ret < 0) {
|
||||
/* XXX we can't commit, shutdown? */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* transition flags for deleting input files */
|
||||
for (i = 0; i < res->nr; i++) {
|
||||
@@ -1457,7 +1454,7 @@ update:
|
||||
le64_to_cpu(pending->id), 0);
|
||||
ret = scoutfs_btree_insert(sb, alloc, wri, root, &key,
|
||||
pending, sizeof(*pending));
|
||||
if (ret < 0)
|
||||
if (WARN_ON_ONCE(ret < 0)) /* XXX inconsistency */
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1470,7 +1467,6 @@ update:
|
||||
BUG_ON(err); /* both busy and pending present */
|
||||
}
|
||||
out:
|
||||
WARN_ON_ONCE(ret < 0); /* XXX inconsistency */
|
||||
kfree(busy);
|
||||
return ret;
|
||||
}
|
||||
@@ -1588,8 +1584,7 @@ static int kway_merge(struct super_block *sb,
|
||||
nr_parents = max_t(unsigned long, 1, roundup_pow_of_two(nr) - 1);
|
||||
/* root at [1] for easy sib/parent index calc, final pad for odd sib */
|
||||
nr_nodes = 1 + nr_parents + nr + 1;
|
||||
tnodes = __vmalloc(nr_nodes * sizeof(struct tourn_node),
|
||||
GFP_NOFS, PAGE_KERNEL);
|
||||
tnodes = kc__vmalloc(nr_nodes * sizeof(struct tourn_node), GFP_NOFS);
|
||||
if (!tnodes)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1669,7 +1664,7 @@ static int kway_merge(struct super_block *sb,
|
||||
/* end sorted block on _SAFE offset for testing */
|
||||
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
|
||||
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
|
||||
pad_entries_at_safe(sfl, blk, srb);
|
||||
pad_entries_at_safe(sfl, srb);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
blk++;
|
||||
@@ -1802,7 +1797,7 @@ static void swap_page_sre(void *A, void *B, int size)
|
||||
* typically, ~10x worst case).
|
||||
*
|
||||
* Because we read and sort all the input files we must perform the full
|
||||
* compaction in one operation. The server must have given us a
|
||||
* compaction in one operation. The server must have given us
|
||||
* sufficiently large avail/freed lists, otherwise we'll return ENOSPC.
|
||||
*/
|
||||
static int compact_logs(struct super_block *sb,
|
||||
@@ -1866,14 +1861,14 @@ static int compact_logs(struct super_block *sb,
|
||||
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
break;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
prev = *sre;
|
||||
@@ -2281,12 +2276,11 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto commit;
|
||||
|
||||
ret = scoutfs_alloc_prepare_commit(sb, &alloc, &wri) ?:
|
||||
scoutfs_alloc_prepare_commit(sb, &alloc, &wri);
|
||||
if (ret == 0)
|
||||
scoutfs_block_writer_write(sb, &wri);
|
||||
commit:
|
||||
|
||||
/* the server won't use our partial compact if _ERROR is set */
|
||||
sc->meta_avail = alloc.avail;
|
||||
sc->meta_freed = alloc.freed;
|
||||
@@ -2303,7 +2297,7 @@ out:
|
||||
scoutfs_inc_counter(sb, srch_compact_error);
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
queue_compact_work(srinf, sc->nr > 0 && ret == 0);
|
||||
queue_compact_work(srinf, sc != NULL && sc->nr > 0 && ret == 0);
|
||||
|
||||
kfree(sc);
|
||||
}
|
||||
|
||||
@@ -49,6 +49,8 @@
|
||||
#include "volopt.h"
|
||||
#include "fence.h"
|
||||
#include "xattr.h"
|
||||
#include "wkic.h"
|
||||
#include "quota.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
static struct dentry *scoutfs_debugfs_root;
|
||||
@@ -158,7 +160,17 @@ static void scoutfs_metadev_close(struct super_block *sb)
|
||||
* from kill_sb->put_super.
|
||||
*/
|
||||
lockdep_off();
|
||||
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
bdev_fput(sbi->meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
blkdev_put(sbi->meta_bdev, sb);
|
||||
#else
|
||||
blkdev_put(sbi->meta_bdev, SCOUTFS_META_BDEV_MODE);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
lockdep_on();
|
||||
sbi->meta_bdev = NULL;
|
||||
}
|
||||
@@ -194,7 +206,9 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_shutdown_trans(sb);
|
||||
scoutfs_volopt_destroy(sb);
|
||||
scoutfs_client_destroy(sb);
|
||||
scoutfs_quota_destroy(sb);
|
||||
scoutfs_inode_destroy(sb);
|
||||
scoutfs_wkic_destroy(sb);
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
scoutfs_data_destroy(sb);
|
||||
@@ -473,7 +487,11 @@ out:
|
||||
static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct scoutfs_mount_options opts;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#else
|
||||
struct block_device *meta_bdev;
|
||||
#endif
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
@@ -494,9 +512,9 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
sbi = kzalloc(sizeof(struct scoutfs_sb_info), GFP_KERNEL);
|
||||
sb->s_fs_info = sbi;
|
||||
sbi->sb = sb;
|
||||
if (!sbi)
|
||||
return -ENOMEM;
|
||||
sbi->sb = sb;
|
||||
|
||||
ret = assign_random_id(sbi);
|
||||
if (ret < 0)
|
||||
@@ -519,7 +537,27 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
/*
|
||||
* pass sbi as holder, since dev_mount already passes sb, which triggers a
|
||||
* WARN_ON because dev_mount also passes non-NULL hops. By passing sbi
|
||||
* here we just get a simple error in our test cases.
|
||||
*/
|
||||
meta_bdev_file = bdev_file_open_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sbi, NULL);
|
||||
if (IS_ERR(meta_bdev_file)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev_file));
|
||||
ret = PTR_ERR(meta_bdev_file);
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev_file = meta_bdev_file;
|
||||
sbi->meta_bdev = file_bdev(meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb, NULL);
|
||||
#else
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb);
|
||||
#endif
|
||||
if (IS_ERR(meta_bdev)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev));
|
||||
@@ -527,6 +565,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev = meta_bdev;
|
||||
#endif
|
||||
|
||||
ret = set_blocksize(sbi->meta_bdev, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != 0) {
|
||||
scoutfs_err(sb, "failed to set metadev blocksize, returned %d",
|
||||
@@ -544,7 +584,9 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_block_setup(sb) ?:
|
||||
scoutfs_forest_setup(sb) ?:
|
||||
scoutfs_item_setup(sb) ?:
|
||||
scoutfs_wkic_setup(sb) ?:
|
||||
scoutfs_inode_setup(sb) ?:
|
||||
scoutfs_quota_setup(sb) ?:
|
||||
scoutfs_data_setup(sb) ?:
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_omap_setup(sb) ?:
|
||||
|
||||
@@ -30,6 +30,8 @@ struct recov_info;
|
||||
struct omap_info;
|
||||
struct volopt_info;
|
||||
struct fence_info;
|
||||
struct wkic_info;
|
||||
struct squota_info;
|
||||
|
||||
struct scoutfs_sb_info {
|
||||
struct super_block *sb;
|
||||
@@ -40,6 +42,9 @@ struct scoutfs_sb_info {
|
||||
u64 fmt_vers;
|
||||
|
||||
struct block_device *meta_bdev;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#endif
|
||||
|
||||
spinlock_t next_ino_lock;
|
||||
|
||||
@@ -55,6 +60,8 @@ struct scoutfs_sb_info {
|
||||
struct omap_info *omap_info;
|
||||
struct volopt_info *volopt_info;
|
||||
struct item_cache_info *item_cache_info;
|
||||
struct wkic_info *wkic_info;
|
||||
struct squota_info *squota_info;
|
||||
struct fence_info *fence_info;
|
||||
|
||||
/* tracks tasks waiting for data extents */
|
||||
@@ -97,7 +104,11 @@ static inline bool SCOUTFS_IS_META_BDEV(struct scoutfs_super_block *super_block)
|
||||
return !!(le64_to_cpu(super_block->flags) & SCOUTFS_FLAG_IS_META_BDEV);
|
||||
}
|
||||
|
||||
#ifdef KC_HAVE_BLK_MODE_T
|
||||
#define SCOUTFS_META_BDEV_MODE (BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_EXCL)
|
||||
#else
|
||||
#define SCOUTFS_META_BDEV_MODE (FMODE_READ | FMODE_WRITE | FMODE_EXCL)
|
||||
#endif
|
||||
|
||||
static inline bool scoutfs_forcing_unmount(struct super_block *sb)
|
||||
{
|
||||
@@ -156,4 +167,17 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
/* to keep this out of the ioctl.h public interface definition */
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
||||
/*
|
||||
* Returns 0 when supported, non-zero -errno when unsupported.
|
||||
*/
|
||||
static inline int scoutfs_fmt_vers_unsupported(struct super_block *sb, u64 vers)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi && (sbi->fmt_vers < vers))
|
||||
return -EOPNOTSUPP;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "sysfs.h"
|
||||
|
||||
90
kmod/src/totl.c
Normal file
90
kmod/src/totl.c
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "forest.h"
|
||||
#include "totl.h"
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end)
|
||||
{
|
||||
scoutfs_key_set_zeros(start);
|
||||
start->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(end);
|
||||
end->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg)
|
||||
{
|
||||
memset(merg, 0, sizeof(struct scoutfs_totl_merging));
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic)
|
||||
{
|
||||
struct scoutfs_xattr_totl_val *tval = val;
|
||||
|
||||
if (fic & FIC_FS_ROOT) {
|
||||
merg->fs_seq = seq;
|
||||
merg->fs_total = le64_to_cpu(tval->total);
|
||||
merg->fs_count = le64_to_cpu(tval->count);
|
||||
} else if (fic & FIC_FINALIZED) {
|
||||
merg->fin_seq = seq;
|
||||
merg->fin_total += le64_to_cpu(tval->total);
|
||||
merg->fin_count += le64_to_cpu(tval->count);
|
||||
} else {
|
||||
merg->log_seq = seq;
|
||||
merg->log_total += le64_to_cpu(tval->total);
|
||||
merg->log_count += le64_to_cpu(tval->count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* .totl. item merging has to be careful because the log btree merging
|
||||
* code can write partial results to the fs_root. This means that a
|
||||
* reader can see both cases where new finalized logs should be applied
|
||||
* to the old fs items and where old finalized logs have already been
|
||||
* applied to the partially merged fs items. Currently active logged
|
||||
* items are always applied on top of all cases.
|
||||
*
|
||||
* These cases are differentiated with a combination of sequence numbers
|
||||
* in items, the count of contributing xattrs, and a flag
|
||||
* differentiating finalized and active logged items. This lets us
|
||||
* recognize all cases, including when finalized logs were merged and
|
||||
* deleted the fs item.
|
||||
*/
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count)
|
||||
{
|
||||
*total = 0;
|
||||
*count = 0;
|
||||
|
||||
/* start with the fs item if we have it */
|
||||
if (merg->fs_seq != 0) {
|
||||
*total = merg->fs_total;
|
||||
*count = merg->fs_count;
|
||||
}
|
||||
|
||||
/* apply finalized logs if they're newer or creating */
|
||||
if (((merg->fs_seq != 0) && (merg->fin_seq > merg->fs_seq)) ||
|
||||
((merg->fs_seq == 0) && (merg->fin_count > 0))) {
|
||||
*total += merg->fin_total;
|
||||
*count += merg->fin_count;
|
||||
}
|
||||
|
||||
/* always apply active logs which must be newer than fs and finalized */
|
||||
if (merg->log_seq > 0) {
|
||||
*total += merg->log_total;
|
||||
*count += merg->log_count;
|
||||
}
|
||||
}
|
||||
24
kmod/src/totl.h
Normal file
24
kmod/src/totl.h
Normal file
@@ -0,0 +1,24 @@
|
||||
#ifndef _SCOUTFS_TOTL_H_
|
||||
#define _SCOUTFS_TOTL_H_
|
||||
|
||||
#include "key.h"
|
||||
|
||||
struct scoutfs_totl_merging {
|
||||
u64 fs_seq;
|
||||
u64 fs_total;
|
||||
u64 fs_count;
|
||||
u64 fin_seq;
|
||||
u64 fin_total;
|
||||
s64 fin_count;
|
||||
u64 log_seq;
|
||||
u64 log_total;
|
||||
s64 log_count;
|
||||
};
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg);
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic);
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count);
|
||||
|
||||
#endif
|
||||
143
kmod/src/trace/quota.h
Normal file
143
kmod/src/trace/quota.h
Normal file
@@ -0,0 +1,143 @@
|
||||
|
||||
/*
|
||||
* Tracing squota_input
|
||||
*/
|
||||
#define SQI_FMT "[%u %llu %llu %llu]"
|
||||
|
||||
#define SQI_ARGS(i) \
|
||||
(i)->op, (i)->attrs[0], (i)->attrs[1], (i)->attrs[2]
|
||||
|
||||
#define SQI_FIELDS(pref) \
|
||||
__array(__u64, pref##_attrs, SQ_NS__NR_SELECT) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQI_ASSIGN(pref, i) \
|
||||
__entry->pref##_attrs[0] = (i)->attrs[0]; \
|
||||
__entry->pref##_attrs[1] = (i)->attrs[1]; \
|
||||
__entry->pref##_attrs[2] = (i)->attrs[2]; \
|
||||
__entry->pref##_op = (i)->op;
|
||||
|
||||
#define SQI_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_op, __entry->pref##_attrs[0], \
|
||||
__entry->pref##_attrs[1], __entry->pref##_attrs[2]
|
||||
|
||||
/*
|
||||
* Tracing squota_rule
|
||||
*/
|
||||
#define SQR_FMT "[%u %llu,%u,%x %llu,%u,%x %llu,%u,%x %u %llu]"
|
||||
|
||||
#define SQR_ARGS(r) \
|
||||
(r)->prio, \
|
||||
(r)->name_val[0], (r)->name_source[0], (r)->name_flags[0], \
|
||||
(r)->name_val[1], (r)->name_source[1], (r)->name_flags[1], \
|
||||
(r)->name_val[2], (r)->name_source[2], (r)->name_flags[2], \
|
||||
(r)->op, (r)->limit \
|
||||
|
||||
#define SQR_FIELDS(pref) \
|
||||
__array(__u64, pref##_name_val, 3) \
|
||||
__field(__u64, pref##_limit) \
|
||||
__array(__u8, pref##_name_source, 3) \
|
||||
__array(__u8, pref##_name_flags, 3) \
|
||||
__field(__u8, pref##_prio) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQR_ASSIGN(pref, r) \
|
||||
__entry->pref##_name_val[0] = (r)->names[0].val; \
|
||||
__entry->pref##_name_val[1] = (r)->names[1].val; \
|
||||
__entry->pref##_name_val[2] = (r)->names[2].val; \
|
||||
__entry->pref##_limit = (r)->limit; \
|
||||
__entry->pref##_name_source[0] = (r)->names[0].source; \
|
||||
__entry->pref##_name_source[1] = (r)->names[1].source; \
|
||||
__entry->pref##_name_source[2] = (r)->names[2].source; \
|
||||
__entry->pref##_name_flags[0] = (r)->names[0].flags; \
|
||||
__entry->pref##_name_flags[1] = (r)->names[1].flags; \
|
||||
__entry->pref##_name_flags[2] = (r)->names[2].flags; \
|
||||
__entry->pref##_prio = (r)->prio; \
|
||||
__entry->pref##_op = (r)->op;
|
||||
|
||||
#define SQR_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_prio, __entry->pref##_name_val[0], \
|
||||
__entry->pref##_name_source[0], __entry->pref##_name_flags[0], \
|
||||
__entry->pref##_name_val[1], __entry->pref##_name_source[1], \
|
||||
__entry->pref##_name_flags[1], __entry->pref##_name_val[2], \
|
||||
__entry->pref##_name_source[2], __entry->pref##_name_flags[2], \
|
||||
__entry->pref##_op, __entry->pref##_limit
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_check,
|
||||
TP_PROTO(struct super_block *sb, long rs_ptr, struct squota_input *inp, int ret),
|
||||
|
||||
TP_ARGS(sb, rs_ptr, inp, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(long, rs_ptr)
|
||||
SQI_FIELDS(i)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->rs_ptr = rs_ptr;
|
||||
SQI_ASSIGN(i, inp);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rs_ptr %ld ret %d inp "SQI_FMT,
|
||||
SCSB_TRACE_ARGS, __entry->rs_ptr, __entry->ret, SQI_ENTRY_ARGS(i))
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quota_rule_op_class,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
|
||||
TP_ARGS(sb, rule, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQR_FIELDS(r)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQR_ASSIGN(r, rule);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" "SQR_FMT" ret %d",
|
||||
SCSB_TRACE_ARGS, SQR_ENTRY_ARGS(r), __entry->ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_add_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_del_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_totl_check,
|
||||
TP_PROTO(struct super_block *sb, struct squota_input *inp, struct scoutfs_key *key,
|
||||
u64 limit, int ret),
|
||||
|
||||
TP_ARGS(sb, inp, key, limit, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQI_FIELDS(i)
|
||||
sk_trace_define(k)
|
||||
__field(__u64, limit)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQI_ASSIGN(i, inp);
|
||||
sk_trace_assign(k, key);
|
||||
__entry->limit = limit;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" inp "SQI_FMT" key "SK_FMT" limit %llu ret %d",
|
||||
SCSB_TRACE_ARGS, SQI_ENTRY_ARGS(i), sk_trace_args(k), __entry->limit,
|
||||
__entry->ret)
|
||||
);
|
||||
112
kmod/src/trace/wkic.h
Normal file
112
kmod/src/trace/wkic.h
Normal file
@@ -0,0 +1,112 @@
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_wkic_wpage_class,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, ptr)
|
||||
__field(int, which)
|
||||
__field(bool, n0l)
|
||||
__field(bool, n1l)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ptr = ptr;
|
||||
__entry->which = which;
|
||||
__entry->n0l = n0l;
|
||||
__entry->n1l = n1l;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
__entry->which = which;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ptr %p wh %d nl %u,%u start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
__entry->ptr, __entry->which, __entry->n0l, __entry->n1l,
|
||||
sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_alloced,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_freeing,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_found,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_trimmed,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_erased,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserting,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserted,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_shrinking,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_dropping,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_replaying,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_filled,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_wkic_read_items,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, key, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(key)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(key, start);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" key "SK_FMT" start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
sk_trace_args(key), sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
@@ -159,6 +159,58 @@ static bool drained_holders(struct trans_info *tri)
|
||||
return holders == 0;
|
||||
}
|
||||
|
||||
static int commit_current_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
return (*str = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(*str = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(*str = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(*str = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri)) ?:
|
||||
(*str = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(*str = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(*str = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb);
|
||||
}
|
||||
|
||||
static int get_next_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
return (*str = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
}
|
||||
|
||||
static int retry_forever(struct super_block *sb, int (*func)(struct super_block *sb, char **str))
|
||||
{
|
||||
bool retrying = false;
|
||||
char *str;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
str = NULL;
|
||||
|
||||
ret = func(sb, &str);
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
str, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -ENOLINK;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work func is responsible for writing out all the dirty blocks
|
||||
* that make up the current dirty transaction. It prevents writers from
|
||||
@@ -184,8 +236,6 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
struct trans_info *tri = container_of(work, struct trans_info, write_work.work);
|
||||
struct super_block *sb = tri->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
bool retrying = false;
|
||||
char *s = NULL;
|
||||
int ret = 0;
|
||||
|
||||
tri->task = current;
|
||||
@@ -202,7 +252,7 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
ret = -ENOLINK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -214,37 +264,9 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
|
||||
scoutfs_inc_counter(sb, trans_commit_written);
|
||||
|
||||
do {
|
||||
ret = (s = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(s = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(s = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(s = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc,
|
||||
&tri->wri)) ?:
|
||||
(s = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(s = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(s = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb) ?:
|
||||
(s = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
s, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
/* retry {commit,get}_log_trees until they succeeed, can only fail when forcing unmount */
|
||||
ret = retry_forever(sb, commit_current_log_trees) ?:
|
||||
retry_forever(sb, get_next_log_trees);
|
||||
out:
|
||||
spin_lock(&tri->write_lock);
|
||||
tri->write_count++;
|
||||
|
||||
@@ -93,13 +93,9 @@ int scoutfs_setup_triggers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++) {
|
||||
if (!debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i])) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++)
|
||||
debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i]);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
@@ -183,6 +183,13 @@ static void *scoutfs_tseq_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
ent = tseq_rb_next(ent);
|
||||
if (ent)
|
||||
*pos = ent->pos;
|
||||
else
|
||||
/*
|
||||
* once we hit the end, *pos is never used, but it has to
|
||||
* be updated to avoid an error in bpf_seq_read()
|
||||
*/
|
||||
(*pos)++;
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
||||
1160
kmod/src/wkic.c
Normal file
1160
kmod/src/wkic.c
Normal file
File diff suppressed because it is too large
Load Diff
19
kmod/src/wkic.h
Normal file
19
kmod/src/wkic.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#ifndef _SCOUTFS_WKIC_H_
|
||||
#define _SCOUTFS_WKIC_H_
|
||||
|
||||
#include "format.h"
|
||||
|
||||
typedef int (*wkic_iter_cb_t)(struct scoutfs_key *key, void *val, unsigned int val_len,
|
||||
void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_iterate(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *last,
|
||||
struct scoutfs_key *range_start, struct scoutfs_key *range_end,
|
||||
wkic_iter_cb_t cb, void *cb_arg);
|
||||
int scoutfs_wkic_iterate_stable(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *last, struct scoutfs_key *range_start,
|
||||
struct scoutfs_key *range_end, wkic_iter_cb_t cb, void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_setup(struct super_block *sb);
|
||||
void scoutfs_wkic_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
257
kmod/src/xattr.c
257
kmod/src/xattr.c
@@ -81,7 +81,20 @@ static void init_xattr_key(struct scoutfs_key *key, u64 ino, u32 name_hash,
|
||||
#define SCOUTFS_XATTR_PREFIX "scoutfs."
|
||||
#define SCOUTFS_XATTR_PREFIX_LEN (sizeof(SCOUTFS_XATTR_PREFIX) - 1)
|
||||
|
||||
/*
|
||||
* We could have hidden the logic that needs this in a user-prefix
|
||||
* specific .set handler, but I wanted to make sure that we always
|
||||
* applied that logic from any call chains to _xattr_set. The
|
||||
* additional strcmp isn't so expensive given all the rest of the work
|
||||
* we're doing in here.
|
||||
*/
|
||||
static inline bool is_user(const char *name)
|
||||
{
|
||||
return !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
|
||||
}
|
||||
|
||||
#define HIDE_TAG "hide."
|
||||
#define INDX_TAG "indx."
|
||||
#define SRCH_TAG "srch."
|
||||
#define TOTL_TAG "totl."
|
||||
#define TAG_LEN (sizeof(HIDE_TAG) - 1)
|
||||
@@ -103,6 +116,9 @@ int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
if (!strncmp(name, HIDE_TAG, TAG_LEN)) {
|
||||
if (++tgs->hide == 0)
|
||||
return -EINVAL;
|
||||
} else if (!strncmp(name, INDX_TAG, TAG_LEN)) {
|
||||
if (++tgs->indx == 0)
|
||||
return -EINVAL;
|
||||
} else if (!strncmp(name, SRCH_TAG, TAG_LEN)) {
|
||||
if (++tgs->srch == 0)
|
||||
return -EINVAL;
|
||||
@@ -540,47 +556,57 @@ static int parse_totl_u64(const char *s, int len, u64 *res)
|
||||
}
|
||||
|
||||
/*
|
||||
* non-destructive relatively quick parse of the last 3 dotted u64s that
|
||||
* make up the name of the xattr total. -EINVAL is returned if there
|
||||
* are anything but 3 valid u64 encodings between single dots at the end
|
||||
* of the name.
|
||||
* non-destructive relatively quick parse of final dotted u64s in an
|
||||
* xattr name. If the required number of values are found then we
|
||||
* return the number of bytes in the name that are not the final dotted
|
||||
* u64s with their dots. -EINVAL is returned if we didn't find the
|
||||
* required number of values.
|
||||
*/
|
||||
static int parse_totl_key(struct scoutfs_key *key, const char *name, int name_len)
|
||||
static int parse_dotted_u64s(u64 *u64s, int nr, const char *name, int name_len)
|
||||
{
|
||||
u64 tot_name[3];
|
||||
int end = name_len;
|
||||
int nr = 0;
|
||||
int len;
|
||||
int ret;
|
||||
int i;
|
||||
int u;
|
||||
|
||||
/* parse name elements in reserve order from end of xattr name string */
|
||||
for (i = name_len - 1; i >= 0 && nr < ARRAY_SIZE(tot_name); i--) {
|
||||
for (u = nr - 1, i = name_len - 1; u >= 0 && i >= 0; i--) {
|
||||
if (name[i] != '.')
|
||||
continue;
|
||||
|
||||
len = end - (i + 1);
|
||||
ret = parse_totl_u64(&name[i + 1], len, &tot_name[nr]);
|
||||
ret = parse_totl_u64(&name[i + 1], len, &u64s[u]);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
end = i;
|
||||
nr++;
|
||||
u--;
|
||||
}
|
||||
|
||||
if (nr == ARRAY_SIZE(tot_name)) {
|
||||
/* swap to account for parsing in reverse */
|
||||
swap(tot_name[0], tot_name[2]);
|
||||
scoutfs_xattr_init_totl_key(key, tot_name);
|
||||
ret = 0;
|
||||
} else {
|
||||
if (u == -1)
|
||||
ret = end;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int parse_totl_key(struct scoutfs_key *key, const char *name, int name_len)
|
||||
{
|
||||
u64 u64s[3];
|
||||
int ret;
|
||||
|
||||
ret = parse_dotted_u64s(u64s, ARRAY_SIZE(u64s), name, name_len);
|
||||
if (ret >= 0) {
|
||||
scoutfs_xattr_init_totl_key(key, u64s);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int apply_totl_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_xattr_totl_val *tval, struct scoutfs_lock *lock)
|
||||
{
|
||||
@@ -607,6 +633,72 @@ int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len)
|
||||
return SCOUTFS_DELTA_COMBINED;
|
||||
}
|
||||
|
||||
void scoutfs_xattr_indx_get_range(struct scoutfs_key *start, struct scoutfs_key *end)
|
||||
{
|
||||
scoutfs_key_set_zeros(start);
|
||||
start->sk_zone = SCOUTFS_XATTR_INDX_ZONE;
|
||||
scoutfs_key_set_ones(end);
|
||||
end->sk_zone = SCOUTFS_XATTR_INDX_ZONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* .indx. keys are a bit funny because we're iterating over index keys
|
||||
* by major:minor:inode:xattr_id. That doesn't map nicely to the
|
||||
* comparison precedence of the key fields. We have to mess around a
|
||||
* little bit to get the major into the most significant key bits and
|
||||
* the low bits of xattr id into the least significant key bits.
|
||||
*/
|
||||
void scoutfs_xattr_init_indx_key(struct scoutfs_key *key, u8 major, u64 minor, u64 ino, u64 xid)
|
||||
{
|
||||
scoutfs_key_set_zeros(key);
|
||||
key->sk_zone = SCOUTFS_XATTR_INDX_ZONE;
|
||||
|
||||
key->_sk_first = cpu_to_le64(((u64)major << 56) | (minor >> 8));
|
||||
key->_sk_second = cpu_to_le64((minor << 56) | (ino >> 8));
|
||||
key->_sk_third = cpu_to_le64((ino << 56) | (xid >> 8));
|
||||
key->_sk_fourth = xid & 0xff;
|
||||
}
|
||||
|
||||
void scoutfs_xattr_get_indx_key(struct scoutfs_key *key, u8 *major, u64 *minor, u64 *ino, u64 *xid)
|
||||
{
|
||||
*major = le64_to_cpu(key->_sk_first) >> 56;
|
||||
*minor = (le64_to_cpu(key->_sk_first) << 8) | (le64_to_cpu(key->_sk_second) >> 56);
|
||||
*ino = (le64_to_cpu(key->_sk_second) << 8) | (le64_to_cpu(key->_sk_third) >> 56);
|
||||
*xid = (le64_to_cpu(key->_sk_third) << 8) | key->_sk_fourth;
|
||||
}
|
||||
|
||||
void scoutfs_xattr_set_indx_key_xid(struct scoutfs_key *key, u64 xid)
|
||||
{
|
||||
u8 major;
|
||||
u64 minor;
|
||||
u64 ino;
|
||||
u64 dummy;
|
||||
|
||||
scoutfs_xattr_get_indx_key(key, &major, &minor, &ino, &dummy);
|
||||
scoutfs_xattr_init_indx_key(key, major, minor, ino, xid);
|
||||
}
|
||||
|
||||
/*
|
||||
* This initial parsing of the name doesn't yet have access to an xattr
|
||||
* id to put in the key. That's added later as the existing xattr is
|
||||
* found or a new xattr's id is allocated.
|
||||
*/
|
||||
static int parse_indx_key(struct scoutfs_key *key, const char *name, int name_len, u64 ino)
|
||||
{
|
||||
u64 u64s[2];
|
||||
int ret;
|
||||
|
||||
ret = parse_dotted_u64s(u64s, ARRAY_SIZE(u64s), name, name_len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (u64s[0] > U8_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
scoutfs_xattr_init_indx_key(key, u64s[0], u64s[1], ino, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The confusing swiss army knife of creating, modifying, and deleting
|
||||
* xattrs.
|
||||
@@ -627,7 +719,7 @@ int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len)
|
||||
int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_len,
|
||||
const void *value, size_t size, int flags,
|
||||
const struct scoutfs_xattr_prefix_tags *tgs,
|
||||
struct scoutfs_lock *lck, struct scoutfs_lock *totl_lock,
|
||||
struct scoutfs_lock *lck, struct scoutfs_lock *tag_lock,
|
||||
struct list_head *ind_locks)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
@@ -635,10 +727,11 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_xattr_totl_val tval = {0,};
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_key totl_key;
|
||||
struct scoutfs_key tag_key;
|
||||
struct scoutfs_key key;
|
||||
bool undo_srch = false;
|
||||
bool undo_totl = false;
|
||||
bool undo_indx = false;
|
||||
u8 found_parts;
|
||||
unsigned int xat_bytes_totl;
|
||||
unsigned int xat_bytes;
|
||||
@@ -649,9 +742,10 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
trace_scoutfs_xattr_set(sb, name_len, value, size, flags);
|
||||
trace_scoutfs_xattr_set(sb, ino, name_len, value, size, flags);
|
||||
|
||||
if (WARN_ON_ONCE(tgs->totl && !totl_lock))
|
||||
if (WARN_ON_ONCE(tgs->totl && tgs->indx) ||
|
||||
WARN_ON_ONCE((tgs->totl | tgs->indx) && !tag_lock))
|
||||
return -EINVAL;
|
||||
|
||||
/* mirror the syscall's errors for large names and values */
|
||||
@@ -664,10 +758,22 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
(flags & ~(XATTR_CREATE | XATTR_REPLACE)))
|
||||
return -EINVAL;
|
||||
|
||||
if ((tgs->hide | tgs->srch | tgs->totl) && !capable(CAP_SYS_ADMIN))
|
||||
if ((tgs->hide | tgs->indx | tgs->srch | tgs->totl) && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (tgs->totl && ((ret = parse_totl_key(&totl_key, name, name_len)) != 0))
|
||||
if (tgs->totl && ((ret = parse_totl_key(&tag_key, name, name_len)) != 0))
|
||||
return ret;
|
||||
|
||||
if (tgs->indx &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_INDX_TAG)))
|
||||
return ret;
|
||||
|
||||
if (tgs->indx && ((ret = parse_indx_key(&tag_key, name, name_len, ino)) != 0))
|
||||
return ret;
|
||||
|
||||
/* retention blocks user. xattr modification, all else allowed */
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0 && is_user(name))
|
||||
return ret;
|
||||
|
||||
/* allocate enough to always read an existing xattr's totl */
|
||||
@@ -708,6 +814,12 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
/* found fields in key will also be used */
|
||||
found_parts = ret >= 0 ? xattr_nr_parts(xat) : 0;
|
||||
|
||||
/* use existing xattr's id or allocate new when creating */
|
||||
if (found_parts)
|
||||
id = le64_to_cpu(key.skx_id);
|
||||
else if (value)
|
||||
id = si->next_xattr_id++;
|
||||
|
||||
if (found_parts && tgs->totl) {
|
||||
/* parse old totl value before we clobber xat buf */
|
||||
val_len = ret - offsetof(struct scoutfs_xattr, name[xat->name_len]);
|
||||
@@ -718,12 +830,25 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
le64_add_cpu(&tval.total, -total);
|
||||
}
|
||||
|
||||
/*
|
||||
* indx xattrs don't have a value. After returning an error for
|
||||
* non-zero val length or short circuiting modifying with the
|
||||
* same 0 length, all we're left with is creating or deleting
|
||||
* the xattr.
|
||||
*/
|
||||
if (tgs->indx) {
|
||||
if (size != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (found_parts && value) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* prepare the xattr header, name, and start of value in first item */
|
||||
if (value) {
|
||||
if (found_parts)
|
||||
id = le64_to_cpu(key.skx_id);
|
||||
else
|
||||
id = si->next_xattr_id++;
|
||||
xat->name_len = name_len;
|
||||
xat->val_len = cpu_to_le16(size);
|
||||
memset(xat->__pad, 0, sizeof(xat->__pad));
|
||||
@@ -741,9 +866,18 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
le64_add_cpu(&tval.total, total);
|
||||
}
|
||||
|
||||
if (tgs->indx) {
|
||||
scoutfs_xattr_set_indx_key_xid(&tag_key, id);
|
||||
if (value)
|
||||
ret = scoutfs_item_create_force(sb, &tag_key, NULL, 0, tag_lock, NULL);
|
||||
else
|
||||
ret = scoutfs_item_delete_force(sb, &tag_key, tag_lock, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
undo_indx = true;
|
||||
}
|
||||
|
||||
if (tgs->srch && !(found_parts && value)) {
|
||||
if (found_parts)
|
||||
id = le64_to_cpu(key.skx_id);
|
||||
hash = scoutfs_hash64(name, name_len);
|
||||
ret = scoutfs_forest_srch_add(sb, hash, ino, id);
|
||||
if (ret < 0)
|
||||
@@ -752,7 +886,7 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
}
|
||||
|
||||
if (tgs->totl) {
|
||||
ret = apply_totl_delta(sb, &totl_key, &tval, totl_lock);
|
||||
ret = apply_totl_delta(sb, &tag_key, &tval, tag_lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
undo_totl = true;
|
||||
@@ -777,6 +911,13 @@ int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (ret < 0 && undo_indx) {
|
||||
if (value)
|
||||
err = scoutfs_item_delete_force(sb, &tag_key, tag_lock, NULL);
|
||||
else
|
||||
err = scoutfs_item_create_force(sb, &tag_key, NULL, 0, tag_lock, NULL);
|
||||
BUG_ON(err); /* inconsistent */
|
||||
}
|
||||
if (ret < 0 && undo_srch) {
|
||||
err = scoutfs_forest_srch_add(sb, hash, ino, id);
|
||||
BUG_ON(err);
|
||||
@@ -785,7 +926,7 @@ out:
|
||||
/* _delta() on dirty items shouldn't fail */
|
||||
tval.total = cpu_to_le64(-le64_to_cpu(tval.total));
|
||||
tval.count = cpu_to_le64(-le64_to_cpu(tval.count));
|
||||
err = apply_totl_delta(sb, &totl_key, &tval, totl_lock);
|
||||
err = apply_totl_delta(sb, &tag_key, &tval, tag_lock);
|
||||
BUG_ON(err);
|
||||
}
|
||||
|
||||
@@ -801,7 +942,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name, const void
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_xattr_prefix_tags tgs;
|
||||
struct scoutfs_lock *totl_lock = NULL;
|
||||
struct scoutfs_lock *tag_lock = NULL;
|
||||
struct scoutfs_lock *lck = NULL;
|
||||
size_t name_len = strlen(name);
|
||||
LIST_HEAD(ind_locks);
|
||||
@@ -816,8 +957,11 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name, const void
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
if (tgs.totl) {
|
||||
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &totl_lock);
|
||||
if (tgs.totl || tgs.indx) {
|
||||
if (tgs.totl)
|
||||
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &tag_lock);
|
||||
else
|
||||
ret = scoutfs_lock_xattr_indx(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &tag_lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
@@ -836,7 +980,7 @@ retry:
|
||||
goto release;
|
||||
|
||||
ret = scoutfs_xattr_set_locked(dentry->d_inode, name, name_len, value, size, flags, &tgs,
|
||||
lck, totl_lock, &ind_locks);
|
||||
lck, tag_lock, &ind_locks);
|
||||
if (ret == 0)
|
||||
scoutfs_update_inode_item(inode, lck, &ind_locks);
|
||||
|
||||
@@ -845,7 +989,7 @@ release:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lck, SCOUTFS_LOCK_WRITE);
|
||||
scoutfs_unlock(sb, totl_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
scoutfs_unlock(sb, tag_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -882,7 +1026,9 @@ static int scoutfs_xattr_get_handler
|
||||
|
||||
static int scoutfs_xattr_set_handler
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
(const struct xattr_handler *handler, struct dentry *dentry,
|
||||
(const struct xattr_handler *handler,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags)
|
||||
{
|
||||
@@ -1055,14 +1201,15 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
{
|
||||
struct scoutfs_xattr_prefix_tags tgs;
|
||||
struct scoutfs_xattr *xat = NULL;
|
||||
struct scoutfs_lock *totl_lock = NULL;
|
||||
struct scoutfs_lock *tag_lock = NULL;
|
||||
struct scoutfs_xattr_totl_val tval;
|
||||
struct scoutfs_key totl_key;
|
||||
struct scoutfs_key tag_key;
|
||||
struct scoutfs_key last;
|
||||
struct scoutfs_key key;
|
||||
bool release = false;
|
||||
unsigned int bytes;
|
||||
unsigned int val_len;
|
||||
u8 locked_zone = 0;
|
||||
void *value;
|
||||
u64 total;
|
||||
u64 hash;
|
||||
@@ -1108,16 +1255,32 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = parse_totl_key(&totl_key, xat->name, xat->name_len) ?:
|
||||
ret = parse_totl_key(&tag_key, xat->name, xat->name_len) ?:
|
||||
parse_totl_u64(value, val_len, &total);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (tgs.totl && totl_lock == NULL) {
|
||||
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, &totl_lock);
|
||||
if (tgs.indx) {
|
||||
ret = parse_indx_key(&tag_key, xat->name, xat->name_len, ino);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((tgs.totl || tgs.indx) && locked_zone != tag_key.sk_zone) {
|
||||
if (tag_lock) {
|
||||
scoutfs_unlock(sb, tag_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
tag_lock = NULL;
|
||||
}
|
||||
if (tgs.totl)
|
||||
ret = scoutfs_lock_xattr_totl(sb, SCOUTFS_LOCK_WRITE_ONLY, 0,
|
||||
&tag_lock);
|
||||
else
|
||||
ret = scoutfs_lock_xattr_indx(sb, SCOUTFS_LOCK_WRITE_ONLY, 0,
|
||||
&tag_lock);
|
||||
if (ret < 0)
|
||||
break;
|
||||
locked_zone = tag_key.sk_zone;
|
||||
}
|
||||
|
||||
ret = scoutfs_hold_trans(sb, false);
|
||||
@@ -1140,7 +1303,13 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
if (tgs.totl) {
|
||||
tval.total = cpu_to_le64(-total);
|
||||
tval.count = cpu_to_le64(-1LL);
|
||||
ret = apply_totl_delta(sb, &totl_key, &tval, totl_lock);
|
||||
ret = apply_totl_delta(sb, &tag_key, &tval, tag_lock);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (tgs.indx) {
|
||||
ret = scoutfs_item_delete_force(sb, &tag_key, tag_lock, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -1153,7 +1322,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
|
||||
if (release)
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_unlock(sb, totl_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
scoutfs_unlock(sb, tag_lock, SCOUTFS_LOCK_WRITE_ONLY);
|
||||
kfree(xat);
|
||||
out:
|
||||
return ret;
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
indx:1,
|
||||
srch:1,
|
||||
totl:1;
|
||||
};
|
||||
@@ -30,4 +31,9 @@ int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
void scoutfs_xattr_init_totl_key(struct scoutfs_key *key, u64 *name);
|
||||
int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len);
|
||||
|
||||
void scoutfs_xattr_indx_get_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_xattr_init_indx_key(struct scoutfs_key *key, u8 major, u64 minor, u64 ino, u64 xid);
|
||||
void scoutfs_xattr_get_indx_key(struct scoutfs_key *key, u8 *major, u64 *minor, u64 *ino, u64 *xid);
|
||||
void scoutfs_xattr_set_indx_key_xid(struct scoutfs_key *key, u64 xid);
|
||||
|
||||
#endif
|
||||
|
||||
3
tests/.gitignore
vendored
3
tests/.gitignore
vendored
@@ -9,3 +9,6 @@ src/find_xattrs
|
||||
src/stage_tmpfile
|
||||
src/create_xattr_loop
|
||||
src/o_tmpfile_umask
|
||||
src/o_tmpfile_linkat
|
||||
src/mmap_stress
|
||||
src/mmap_validate
|
||||
|
||||
1
tests/.xfstests-branch
Normal file
1
tests/.xfstests-branch
Normal file
@@ -0,0 +1 @@
|
||||
v2022.05.01-2-g787cd20
|
||||
@@ -12,7 +12,10 @@ BIN := src/createmany \
|
||||
src/find_xattrs \
|
||||
src/create_xattr_loop \
|
||||
src/fragmented_data_extents \
|
||||
src/o_tmpfile_umask
|
||||
src/o_tmpfile_umask \
|
||||
src/o_tmpfile_linkat \
|
||||
src/mmap_stress \
|
||||
src/mmap_validate
|
||||
|
||||
DEPS := $(wildcard src/*.d)
|
||||
|
||||
@@ -22,8 +25,10 @@ ifneq ($(DEPS),)
|
||||
-include $(DEPS)
|
||||
endif
|
||||
|
||||
src/mmap_stress: LIBS+=-lpthread
|
||||
|
||||
$(BIN): %: %.c Makefile
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@ $(LIBS)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
|
||||
@@ -113,9 +113,11 @@ used during the test.
|
||||
| T\_EX\_META\_DEV | scratch meta bdev | -f | /dev/vdd |
|
||||
| T\_EX\_DATA\_DEV | scratch meta bdev | -e | /dev/vdc |
|
||||
| T\_M[0-9] | mount paths | mounted per run | /mnt/test.[0-9]/ |
|
||||
| T\_MODULE | built kernel module | created per run | ../kmod/src/..ko |
|
||||
| T\_NR\_MOUNTS | number of mounts | -n | 3 |
|
||||
| T\_O[0-9] | mount options | created per run | -o server\_addr= |
|
||||
| T\_QUORUM | quorum count | -q | 2 |
|
||||
| T\_EXTRA | per-test file dir | revision ctled | tests/extra/t |
|
||||
| T\_TMP | per-test tmp prefix | made for test | results/tmp/t/tmp |
|
||||
| T\_TMPDIR | per-test tmp dir dir | made for test | results/tmp/t |
|
||||
|
||||
|
||||
882
tests/extra/xfstests/expected-results
Normal file
882
tests/extra/xfstests/expected-results
Normal file
@@ -0,0 +1,882 @@
|
||||
Ran:
|
||||
generic/001
|
||||
generic/002
|
||||
generic/004
|
||||
generic/005
|
||||
generic/006
|
||||
generic/007
|
||||
generic/008
|
||||
generic/009
|
||||
generic/011
|
||||
generic/012
|
||||
generic/013
|
||||
generic/014
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/020
|
||||
generic/021
|
||||
generic/022
|
||||
generic/023
|
||||
generic/024
|
||||
generic/025
|
||||
generic/026
|
||||
generic/028
|
||||
generic/029
|
||||
generic/030
|
||||
generic/031
|
||||
generic/032
|
||||
generic/033
|
||||
generic/034
|
||||
generic/035
|
||||
generic/037
|
||||
generic/039
|
||||
generic/040
|
||||
generic/041
|
||||
generic/050
|
||||
generic/052
|
||||
generic/053
|
||||
generic/056
|
||||
generic/057
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/062
|
||||
generic/063
|
||||
generic/064
|
||||
generic/065
|
||||
generic/066
|
||||
generic/067
|
||||
generic/069
|
||||
generic/070
|
||||
generic/071
|
||||
generic/073
|
||||
generic/076
|
||||
generic/078
|
||||
generic/079
|
||||
generic/080
|
||||
generic/081
|
||||
generic/082
|
||||
generic/084
|
||||
generic/086
|
||||
generic/087
|
||||
generic/088
|
||||
generic/090
|
||||
generic/091
|
||||
generic/092
|
||||
generic/094
|
||||
generic/096
|
||||
generic/097
|
||||
generic/098
|
||||
generic/099
|
||||
generic/101
|
||||
generic/104
|
||||
generic/105
|
||||
generic/106
|
||||
generic/107
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/117
|
||||
generic/118
|
||||
generic/119
|
||||
generic/120
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/124
|
||||
generic/126
|
||||
generic/128
|
||||
generic/129
|
||||
generic/130
|
||||
generic/131
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/141
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/169
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/184
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/215
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/221
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/228
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/236
|
||||
generic/237
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/245
|
||||
generic/246
|
||||
generic/247
|
||||
generic/248
|
||||
generic/249
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/257
|
||||
generic/258
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/286
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/294
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/306
|
||||
generic/307
|
||||
generic/308
|
||||
generic/309
|
||||
generic/312
|
||||
generic/313
|
||||
generic/314
|
||||
generic/315
|
||||
generic/316
|
||||
generic/317
|
||||
generic/319
|
||||
generic/322
|
||||
generic/324
|
||||
generic/325
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/335
|
||||
generic/336
|
||||
generic/337
|
||||
generic/341
|
||||
generic/342
|
||||
generic/343
|
||||
generic/346
|
||||
generic/348
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/360
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/375
|
||||
generic/376
|
||||
generic/377
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/389
|
||||
generic/391
|
||||
generic/392
|
||||
generic/393
|
||||
generic/394
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/401
|
||||
generic/402
|
||||
generic/403
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/426
|
||||
generic/427
|
||||
generic/428
|
||||
generic/436
|
||||
generic/437
|
||||
generic/439
|
||||
generic/440
|
||||
generic/443
|
||||
generic/445
|
||||
generic/446
|
||||
generic/448
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/452
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/460
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/477
|
||||
generic/478
|
||||
generic/479
|
||||
generic/480
|
||||
generic/481
|
||||
generic/483
|
||||
generic/485
|
||||
generic/486
|
||||
generic/487
|
||||
generic/488
|
||||
generic/489
|
||||
generic/490
|
||||
generic/491
|
||||
generic/492
|
||||
generic/498
|
||||
generic/499
|
||||
generic/501
|
||||
generic/502
|
||||
generic/503
|
||||
generic/504
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/509
|
||||
generic/510
|
||||
generic/511
|
||||
generic/512
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/523
|
||||
generic/524
|
||||
generic/525
|
||||
generic/526
|
||||
generic/527
|
||||
generic/528
|
||||
generic/529
|
||||
generic/530
|
||||
generic/531
|
||||
generic/533
|
||||
generic/534
|
||||
generic/535
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/547
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/557
|
||||
generic/566
|
||||
generic/567
|
||||
generic/571
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/604
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/611
|
||||
generic/612
|
||||
generic/613
|
||||
generic/614
|
||||
generic/618
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/632
|
||||
generic/634
|
||||
generic/635
|
||||
generic/637
|
||||
generic/638
|
||||
generic/639
|
||||
generic/640
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/676
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Not
|
||||
run:
|
||||
generic/008
|
||||
generic/009
|
||||
generic/012
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/021
|
||||
generic/022
|
||||
generic/025
|
||||
generic/026
|
||||
generic/031
|
||||
generic/033
|
||||
generic/050
|
||||
generic/052
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/063
|
||||
generic/064
|
||||
generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
generic/091
|
||||
generic/094
|
||||
generic/096
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/118
|
||||
generic/119
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/128
|
||||
generic/130
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/312
|
||||
generic/314
|
||||
generic/316
|
||||
generic/317
|
||||
generic/324
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/353
|
||||
generic/355
|
||||
generic/358
|
||||
generic/359
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
generic/391
|
||||
generic/392
|
||||
generic/395
|
||||
generic/396
|
||||
generic/397
|
||||
generic/398
|
||||
generic/400
|
||||
generic/402
|
||||
generic/404
|
||||
generic/406
|
||||
generic/407
|
||||
generic/408
|
||||
generic/412
|
||||
generic/413
|
||||
generic/414
|
||||
generic/417
|
||||
generic/419
|
||||
generic/420
|
||||
generic/421
|
||||
generic/422
|
||||
generic/424
|
||||
generic/425
|
||||
generic/427
|
||||
generic/439
|
||||
generic/440
|
||||
generic/446
|
||||
generic/449
|
||||
generic/450
|
||||
generic/451
|
||||
generic/453
|
||||
generic/454
|
||||
generic/456
|
||||
generic/458
|
||||
generic/462
|
||||
generic/463
|
||||
generic/465
|
||||
generic/466
|
||||
generic/468
|
||||
generic/469
|
||||
generic/470
|
||||
generic/471
|
||||
generic/474
|
||||
generic/485
|
||||
generic/487
|
||||
generic/488
|
||||
generic/491
|
||||
generic/492
|
||||
generic/499
|
||||
generic/501
|
||||
generic/503
|
||||
generic/505
|
||||
generic/506
|
||||
generic/507
|
||||
generic/508
|
||||
generic/511
|
||||
generic/513
|
||||
generic/514
|
||||
generic/515
|
||||
generic/516
|
||||
generic/517
|
||||
generic/518
|
||||
generic/519
|
||||
generic/520
|
||||
generic/528
|
||||
generic/530
|
||||
generic/536
|
||||
generic/537
|
||||
generic/538
|
||||
generic/539
|
||||
generic/540
|
||||
generic/541
|
||||
generic/542
|
||||
generic/543
|
||||
generic/544
|
||||
generic/545
|
||||
generic/546
|
||||
generic/548
|
||||
generic/549
|
||||
generic/550
|
||||
generic/552
|
||||
generic/553
|
||||
generic/555
|
||||
generic/556
|
||||
generic/566
|
||||
generic/567
|
||||
generic/572
|
||||
generic/573
|
||||
generic/574
|
||||
generic/575
|
||||
generic/576
|
||||
generic/577
|
||||
generic/578
|
||||
generic/580
|
||||
generic/581
|
||||
generic/582
|
||||
generic/583
|
||||
generic/584
|
||||
generic/586
|
||||
generic/587
|
||||
generic/588
|
||||
generic/591
|
||||
generic/592
|
||||
generic/593
|
||||
generic/594
|
||||
generic/595
|
||||
generic/596
|
||||
generic/597
|
||||
generic/598
|
||||
generic/599
|
||||
generic/600
|
||||
generic/601
|
||||
generic/602
|
||||
generic/603
|
||||
generic/605
|
||||
generic/606
|
||||
generic/607
|
||||
generic/608
|
||||
generic/609
|
||||
generic/610
|
||||
generic/612
|
||||
generic/613
|
||||
generic/621
|
||||
generic/623
|
||||
generic/624
|
||||
generic/625
|
||||
generic/626
|
||||
generic/628
|
||||
generic/629
|
||||
generic/630
|
||||
generic/635
|
||||
generic/644
|
||||
generic/645
|
||||
generic/646
|
||||
generic/647
|
||||
generic/651
|
||||
generic/652
|
||||
generic/653
|
||||
generic/654
|
||||
generic/655
|
||||
generic/657
|
||||
generic/658
|
||||
generic/659
|
||||
generic/660
|
||||
generic/661
|
||||
generic/662
|
||||
generic/663
|
||||
generic/664
|
||||
generic/665
|
||||
generic/666
|
||||
generic/667
|
||||
generic/668
|
||||
generic/669
|
||||
generic/673
|
||||
generic/674
|
||||
generic/675
|
||||
generic/677
|
||||
generic/678
|
||||
generic/679
|
||||
generic/680
|
||||
generic/681
|
||||
generic/682
|
||||
generic/683
|
||||
generic/684
|
||||
generic/685
|
||||
generic/686
|
||||
generic/687
|
||||
generic/688
|
||||
generic/689
|
||||
shared/002
|
||||
shared/032
|
||||
Passed all 512 tests
|
||||
44
tests/extra/xfstests/local.exclude
Normal file
44
tests/extra/xfstests/local.exclude
Normal file
@@ -0,0 +1,44 @@
|
||||
generic/003 # missing atime update in buffered read
|
||||
generic/075 # file content mismatch failures (fds, etc)
|
||||
generic/103 # enospc causes trans commit failures
|
||||
generic/108 # mount fails on failing device?
|
||||
generic/112 # file content mismatch failures (fds, etc)
|
||||
generic/213 # enospc causes trans commit failures
|
||||
generic/318 # can't support user namespaces until v5.11
|
||||
generic/321 # requires selinux enabled for '+' in ls?
|
||||
generic/338 # BUG_ON update inode error handling
|
||||
generic/347 # _dmthin_mount doesn't work?
|
||||
generic/356 # swap
|
||||
generic/357 # swap
|
||||
generic/409 # bind mounts not scripted yet
|
||||
generic/410 # bind mounts not scripted yet
|
||||
generic/411 # bind mounts not scripted yet
|
||||
generic/423 # symlink inode size is strlen() + 1 on scoutfs
|
||||
generic/430 # xfs_io copy_range missing in el7
|
||||
generic/431 # xfs_io copy_range missing in el7
|
||||
generic/432 # xfs_io copy_range missing in el7
|
||||
generic/433 # xfs_io copy_range missing in el7
|
||||
generic/434 # xfs_io copy_range missing in el7
|
||||
generic/441 # dm-mapper
|
||||
generic/444 # el9's posix_acl_update_mode is buggy ?
|
||||
generic/467 # open_by_handle ESTALE
|
||||
generic/472 # swap
|
||||
generic/484 # dm-mapper
|
||||
generic/493 # swap
|
||||
generic/494 # swap
|
||||
generic/495 # swap
|
||||
generic/496 # swap
|
||||
generic/497 # swap
|
||||
generic/532 # xfs_io statx attrib_mask missing in el7
|
||||
generic/554 # swap
|
||||
generic/563 # cgroup+loopdev
|
||||
generic/564 # xfs_io copy_range missing in el7
|
||||
generic/565 # xfs_io copy_range missing in el7
|
||||
generic/568 # falloc not resulting in block count increase
|
||||
generic/569 # swap
|
||||
generic/570 # swap
|
||||
generic/620 # dm-hugedisk
|
||||
generic/633 # id-mapped mounts missing in el7
|
||||
generic/636 # swap
|
||||
generic/641 # swap
|
||||
generic/643 # swap
|
||||
@@ -7,8 +7,9 @@ t_status_msg()
|
||||
export T_PASS_STATUS=100
|
||||
export T_SKIP_STATUS=101
|
||||
export T_FAIL_STATUS=102
|
||||
export T_SKIP_PERMITTED_STATUS=103
|
||||
export T_FIRST_STATUS="$T_PASS_STATUS"
|
||||
export T_LAST_STATUS="$T_FAIL_STATUS"
|
||||
export T_LAST_STATUS="$T_SKIP_PERMITTED_STATUS"
|
||||
|
||||
t_pass()
|
||||
{
|
||||
@@ -21,6 +22,17 @@ t_skip()
|
||||
exit $T_SKIP_STATUS
|
||||
}
|
||||
|
||||
#
|
||||
# This exit code is *reserved* for tests that are up-front never going to work
|
||||
# in certain cases. This should be expressly documented per-case and made
|
||||
# abundantly clear before merging. The test itself should document its case.
|
||||
#
|
||||
t_skip_permitted()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
exit $T_SKIP_PERMITTED_STATUS
|
||||
}
|
||||
|
||||
t_fail()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
@@ -52,19 +64,37 @@ t_rc()
|
||||
}
|
||||
|
||||
#
|
||||
# redirect test output back to the output of the invoking script intead
|
||||
# of the compared output.
|
||||
# As run, stdout/err are redirected to a file that will be compared with
|
||||
# the stored expected golden output of the test. This redirects
|
||||
# stdout/err in the script to stdout of the invoking run-test. It's
|
||||
# intended to give visible output of tests without being included in the
|
||||
# golden output.
|
||||
#
|
||||
t_restore_output()
|
||||
# (see the goofy "exec" fd manipulation in the main run-tests as it runs
|
||||
# each test)
|
||||
#
|
||||
t_stdout_invoked()
|
||||
{
|
||||
exec >&6 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# redirect a command's output back to the compared output after the
|
||||
# test has restored its output
|
||||
# This undoes t_stdout_invokved, returning the test's stdout/err to the
|
||||
# output file as it was when it was launched.
|
||||
#
|
||||
t_compare_output()
|
||||
t_stdout_compare()
|
||||
{
|
||||
"$@" >&7 2>&1
|
||||
exec >&7 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# usually bash prints an annoying output message when jobs
|
||||
# are killed. We can avoid that by redirecting stderr for
|
||||
# the bash process when it reaps the jobs that are killed.
|
||||
#
|
||||
t_silent_kill() {
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
kill "$@"
|
||||
wait "$@"
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
}
|
||||
|
||||
@@ -140,13 +140,32 @@ t_filter_dmesg()
|
||||
re="$re|scoutfs .* error.*server failed to bind to.*"
|
||||
re="$re|scoutfs .* critical transaction commit failure.*"
|
||||
|
||||
# ENOLINK (-67) indicates an expected forced unmount error
|
||||
re="$re|scoutfs .* error -67 .*"
|
||||
|
||||
# change-devices causes loop device resizing
|
||||
re="$re|loop: module loaded"
|
||||
re="$re|loop[0-9].* detected capacity change from.*"
|
||||
re="$re|dm-[0-9].* detected capacity change from.*"
|
||||
|
||||
# ignore systemd-journal rotating
|
||||
re="$re|systemd-journald.*"
|
||||
|
||||
# process accounting can be noisy
|
||||
re="$re|Process accounting resumed.*"
|
||||
|
||||
# format vers back/compat tries bad mounts
|
||||
re="$re|scoutfs .* error.*outside of supported version.*"
|
||||
re="$re|scoutfs .* error.*could not get .*super.*"
|
||||
|
||||
# ignore "unsafe core pattern" when xfstests tries to disable cores"
|
||||
re="$re|Unsafe core_pattern used with fs.suid_dumpable=2.*"
|
||||
re="$re|Pipe handler or fully qualified core dump path required.*"
|
||||
re="$re|Set kernel.core_pattern before fs.suid_dumpable.*"
|
||||
|
||||
# perf warning that it adjusted sample rate
|
||||
re="$re|perf: interrupt took too long.*lowering kernel.perf_event_max_sample_rate.*"
|
||||
|
||||
egrep -v "($re)" | \
|
||||
ignore_harmless_unwind_kasan_stack_oob
|
||||
}
|
||||
|
||||
@@ -29,13 +29,12 @@ t_mount_rid()
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given path
|
||||
# in a mounted scoutfs volume.
|
||||
#
|
||||
t_ident()
|
||||
t_ident_from_mnt()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
local mnt="$1"
|
||||
local fsid
|
||||
local rid
|
||||
|
||||
@@ -45,6 +44,38 @@ t_ident()
|
||||
echo "f.${fsid:0:6}.r.${rid:0:6}"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
#
|
||||
t_ident()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
|
||||
t_ident_from_mnt "$mnt"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_ident()
|
||||
{
|
||||
local ident="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$ident"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_mnt()
|
||||
{
|
||||
local mnt="$1"
|
||||
|
||||
t_sysfs_path_from_ident $(t_ident_from_mnt $mnt)
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount's sysfs path, defaulting to mount 0 if none is
|
||||
# specified.
|
||||
@@ -53,7 +84,7 @@ t_sysfs_path()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$(t_ident $nr)"
|
||||
t_sysfs_path_from_ident $(t_ident $nr)
|
||||
}
|
||||
|
||||
#
|
||||
|
||||
88
tests/funcs/tap.sh
Normal file
88
tests/funcs/tap.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
#
|
||||
# Generate TAP format test results
|
||||
#
|
||||
|
||||
t_tap_header()
|
||||
{
|
||||
local runid=$1
|
||||
local sequence=( $(echo $tests) )
|
||||
local count=${#sequence[@]}
|
||||
|
||||
# avoid recreating the same TAP result over again - harness sets this
|
||||
[[ -z "$runid" ]] && runid="*test*"
|
||||
|
||||
cat > $T_RESULTS/scoutfs.tap <<TAPEOF
|
||||
TAP version 14
|
||||
1..${count}
|
||||
#
|
||||
# TAP results for run ${runid}
|
||||
#
|
||||
# host/run info:
|
||||
#
|
||||
# hostname: ${HOSTNAME}
|
||||
# test start time: $(date --utc)
|
||||
# uname -r: $(uname -r)
|
||||
# scoutfs commit id: $(git describe --tags)
|
||||
#
|
||||
# sequence for this run:
|
||||
#
|
||||
TAPEOF
|
||||
|
||||
# Sequence
|
||||
for t in ${tests}; do
|
||||
echo ${t/.sh/}
|
||||
done | cat -n | expand | column -c 120 | expand | sed 's/^ /#/' >> $T_RESULTS/scoutfs.tap
|
||||
echo "#" >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
|
||||
t_tap_progress()
|
||||
{
|
||||
(
|
||||
local i=$(( testcount + 1 ))
|
||||
local testname=$1
|
||||
local result=$2
|
||||
|
||||
local diff=""
|
||||
local dmsg=""
|
||||
|
||||
if [[ -s "$T_RESULTS/tmp/${testname}/dmesg.new" ]]; then
|
||||
dmsg="1"
|
||||
fi
|
||||
|
||||
if ! cmp -s golden/${testname} $T_RESULTS/output/${testname}; then
|
||||
diff="1"
|
||||
fi
|
||||
|
||||
if [[ "${result}" == "100" ]] && [[ -z "${dmsg}" ]] && [[ -z "${diff}" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
elif [[ "${result}" == "103" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
echo "# ${testname} ** skipped - permitted **"
|
||||
else
|
||||
echo "not ok ${i} - ${testname}"
|
||||
case ${result} in
|
||||
101)
|
||||
echo "# ${testname} ** skipped **"
|
||||
;;
|
||||
102)
|
||||
echo "# ${testname} ** failed **"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "#"
|
||||
echo "# diff:"
|
||||
echo "#"
|
||||
diff -u golden/${testname} $T_RESULTS/output/${testname} | expand | sed 's/^/# /'
|
||||
fi
|
||||
|
||||
if [[ -n "${dmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# dmesg:"
|
||||
echo "#"
|
||||
cat "$T_RESULTS/tmp/${testname}/dmesg.new" | sed 's/^/# /'
|
||||
fi
|
||||
fi
|
||||
) >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
155
tests/golden/basic-posix-acl
Normal file
155
tests/golden/basic-posix-acl
Normal file
@@ -0,0 +1,155 @@
|
||||
== setup test directory
|
||||
== getfacl
|
||||
directory drwxr-xr-x 0 0 0 '.'
|
||||
# file: .
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
|
||||
== basic non-acl access through permissions
|
||||
directory drwxr-xr-x 0 44444 0 'dir-testuid'
|
||||
touch: cannot touch 'dir-testuid/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-testuid/symlink-file-group-write': Permission denied
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'dir-testuid/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'symlinkdir-testuid/symlink-file-group-write'
|
||||
== basic acl access
|
||||
directory drwxr-xr-x 0 0 0 'dir-root'
|
||||
touch: cannot touch 'dir-root/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-root/file-group-write': Permission denied
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
regular empty file -rw-r--r-- 22222 0 0 'dir-root/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 0 0 'symlinkdir-root/file-group-write'
|
||||
== directory exec
|
||||
Success
|
||||
Success
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Failed
|
||||
Failed
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
group:44444:rwx
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Success
|
||||
Success
|
||||
== get/set attr
|
||||
regular empty file -rw-r--r-- 0 0 0 'file-root'
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rw-
|
||||
group::r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
mask::r--
|
||||
other::r--
|
||||
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
group:44444:rw-
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
user.test4="Success"
|
||||
|
||||
== inheritance / default acl
|
||||
directory drwxr-xr-x 0 0 0 'dir-root2'
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
directory drwxrwxr-x 22222 0 4 'dir-root2/dir'
|
||||
# file: dir-root2/dir
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
regular empty file -rw-rw-r-- 22222 0 0 'dir-root2/dir/file'
|
||||
# file: dir-root2/dir/file
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rwx #effective:rw-
|
||||
group::r-x #effective:r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
== cleanup
|
||||
@@ -56,3 +56,4 @@ mv: cannot move '/mnt/test/test/basic-posix-consistency/dir/c/clobber' to '/mnt/
|
||||
== inode indexes match after removing and syncing
|
||||
== concurrent creates make one file
|
||||
one-file
|
||||
== cleanup
|
||||
|
||||
@@ -25,3 +25,4 @@ rc: 0
|
||||
equal_prepared
|
||||
large_prepared
|
||||
resized larger test rc: 0
|
||||
== cleanup
|
||||
|
||||
@@ -1,29 +1,29 @@
|
||||
== initial writes smaller than prealloc grow to prealloc size
|
||||
/mnt/test/test/data-prealloc/file-1: 7 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 7 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 7
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 7
|
||||
== larger files get full prealloc extents
|
||||
/mnt/test/test/data-prealloc/file-1: 9 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 9 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 9
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 9
|
||||
== non-streaming writes with contig have per-block extents
|
||||
/mnt/test/test/data-prealloc/file-1: 32 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 32 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 32
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 32
|
||||
== any writes to region prealloc get full extents
|
||||
/mnt/test/test/data-prealloc/file-1: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
== streaming offline writes get full extents either way
|
||||
/mnt/test/test/data-prealloc/file-1: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 4 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
== goofy preallocation amounts work
|
||||
/mnt/test/test/data-prealloc/file-1: 5 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 5 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: 5 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 5 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: 3 extents found
|
||||
/mnt/test/test/data-prealloc/file-2: 3 extents found
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 3
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 3
|
||||
== block writes into region allocs hole
|
||||
wrote blk 24
|
||||
wrote blk 32
|
||||
|
||||
4
tests/golden/format-version-forward-back
Normal file
4
tests/golden/format-version-forward-back
Normal file
@@ -0,0 +1,4 @@
|
||||
== ensuring utils and module for old versions
|
||||
== unmounting test fs and removing test module
|
||||
== testing combinations of old and new format versions
|
||||
== restoring test module and mount
|
||||
@@ -1,4 +1,3 @@
|
||||
== setting longer hung task timeout
|
||||
== creating fragmented extents
|
||||
== unlink file with moved extents to free extents per block
|
||||
== cleanup
|
||||
|
||||
2
tests/golden/lock-shrink-read-race
Normal file
2
tests/golden/lock-shrink-read-race
Normal file
@@ -0,0 +1,2 @@
|
||||
=== setup
|
||||
=== spin reading and shrinking
|
||||
27
tests/golden/mmap
Normal file
27
tests/golden/mmap
Normal file
@@ -0,0 +1,27 @@
|
||||
== mmap_stress
|
||||
thread 0 complete
|
||||
thread 1 complete
|
||||
thread 2 complete
|
||||
thread 3 complete
|
||||
thread 4 complete
|
||||
== basic mmap/read/write consistency checks
|
||||
== mmap read from offline extent
|
||||
0: offset: 0 length: 2 flags: O.L
|
||||
extents: 1
|
||||
1
|
||||
00000200: ea ea ea ea ea ea ea ea ea ea ea ea ea ea ea ea ................
|
||||
0
|
||||
0: offset: 0 length: 2 flags: ..L
|
||||
extents: 1
|
||||
== mmap write to an offline extent
|
||||
0: offset: 0 length: 2 flags: O.L
|
||||
extents: 1
|
||||
1
|
||||
0
|
||||
0: offset: 0 length: 2 flags: ..L
|
||||
extents: 1
|
||||
00000000 ea ea ea ea ea ea ea ea ea ea ea ea ea ea ea ea |................|
|
||||
00000010 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 |................|
|
||||
00000020 ea ea ea ea ea ea ea ea ea ea ea ea ea ea ea ea |................|
|
||||
00000030
|
||||
== done
|
||||
@@ -49,7 +49,7 @@ offline wating should be empty:
|
||||
0
|
||||
== truncating does wait
|
||||
truncate should be waiting for first block:
|
||||
trunate should no longer be waiting:
|
||||
truncate should no longer be waiting:
|
||||
0
|
||||
== writing waits
|
||||
should be waiting for write
|
||||
|
||||
24
tests/golden/projects
Normal file
24
tests/golden/projects
Normal file
@@ -0,0 +1,24 @@
|
||||
== default new files don't have project
|
||||
0
|
||||
== set new project on files and dirs
|
||||
8675309
|
||||
8675309
|
||||
== non-root can see id
|
||||
8675309
|
||||
== can use IDs around long width limits
|
||||
2147483647
|
||||
2147483648
|
||||
4294967295
|
||||
9223372036854775807
|
||||
9223372036854775808
|
||||
18446744073709551615
|
||||
== created files and dirs inherit project id
|
||||
8675309
|
||||
8675309
|
||||
== inheritance continues
|
||||
8675309
|
||||
== clearing project id stops inheritance
|
||||
0
|
||||
0
|
||||
== o_tmpfile creations inherit dir
|
||||
8675309
|
||||
41
tests/golden/quota
Normal file
41
tests/golden/quota
Normal file
@@ -0,0 +1,41 @@
|
||||
== prepare dir with write perm for test ids
|
||||
== test assumes starting with no rules, empty list
|
||||
== add rule
|
||||
7 13,L,- 15,L,- 17,L,- I 33 -
|
||||
== list is empty again after delete
|
||||
== can change limits without deleting
|
||||
1 1,L,- 1,L,- 1,L,- I 100 -
|
||||
1 1,L,- 1,L,- 1,L,- I 101 -
|
||||
1 1,L,- 1,L,- 1,L,- I 99 -
|
||||
== wipe and restore rules in bulk
|
||||
7 15,L,- 0,L,- 0,L,- I 33 -
|
||||
7 14,L,- 0,L,- 0,L,- I 33 -
|
||||
7 13,L,- 0,L,- 0,L,- I 33 -
|
||||
7 12,L,- 0,L,- 0,L,- I 33 -
|
||||
7 11,L,- 0,L,- 0,L,- I 33 -
|
||||
7 10,L,- 0,L,- 0,L,- I 33 -
|
||||
7 15,L,- 0,L,- 0,L,- I 33 -
|
||||
7 14,L,- 0,L,- 0,L,- I 33 -
|
||||
7 13,L,- 0,L,- 0,L,- I 33 -
|
||||
7 12,L,- 0,L,- 0,L,- I 33 -
|
||||
7 11,L,- 0,L,- 0,L,- I 33 -
|
||||
7 10,L,- 0,L,- 0,L,- I 33 -
|
||||
== default rule prevents file creation
|
||||
touch: cannot touch '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
== decreasing totl allows file creation again
|
||||
== attr selecting rules prevent creation
|
||||
touch: cannot touch '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
touch: cannot touch '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
== multi attr selecting doesn't prevent partial
|
||||
touch: cannot touch '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
== op differentiates
|
||||
== higher priority rule applies
|
||||
touch: cannot touch '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
== data rules with total and count prevent write and fallocate
|
||||
dd: error writing '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
fallocate: fallocate failed: Disk quota exceeded
|
||||
dd: error writing '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
fallocate: fallocate failed: Disk quota exceeded
|
||||
== added rules work after bulk restore
|
||||
touch: cannot touch '/mnt/test/test/quota/dir/file': Disk quota exceeded
|
||||
== cleanup
|
||||
28
tests/golden/retention-basic
Normal file
28
tests/golden/retention-basic
Normal file
@@ -0,0 +1,28 @@
|
||||
== setting retention on dir fails
|
||||
attr_x ioctl failed on '/mnt/test/test/retention-basic': Invalid argument (22)
|
||||
scoutfs: set-attr-x failed: Invalid argument (22)
|
||||
== set retention
|
||||
== get-attr-x shows retention
|
||||
1
|
||||
== unpriv can't clear retention
|
||||
attr_x ioctl failed on '/mnt/test/test/retention-basic/file-1': Operation not permitted (1)
|
||||
scoutfs: set-attr-x failed: Operation not permitted (1)
|
||||
== can set hidden scoutfs xattr in retention
|
||||
== setting user. xattr fails in retention
|
||||
setfattr: /mnt/test/test/retention-basic/file-1: Operation not permitted
|
||||
== file deletion fails in retention
|
||||
rm: cannot remove '/mnt/test/test/retention-basic/file-1': Operation not permitted
|
||||
== file rename fails in retention
|
||||
mv: cannot move '/mnt/test/test/retention-basic/file-1' to '/mnt/test/test/retention-basic/file-2': Operation not permitted
|
||||
== file write fails in retention
|
||||
date: write error: Operation not permitted
|
||||
== file truncate fails in retention
|
||||
truncate: failed to truncate '/mnt/test/test/retention-basic/file-1' at 0 bytes: Operation not permitted
|
||||
== setattr fails in retention
|
||||
touch: setting times of '/mnt/test/test/retention-basic/file-1': Operation not permitted
|
||||
== clear retention
|
||||
== file write
|
||||
== file rename
|
||||
== setattr
|
||||
== xattr deletion
|
||||
== cleanup
|
||||
@@ -22,10 +22,8 @@ scoutfs: setattr failed: Invalid argument (22)
|
||||
== large ctime is set
|
||||
1972-02-19 00:06:25.999999999 +0000
|
||||
== large offline extents are created
|
||||
Filesystem type is: 554f4353
|
||||
File size of /mnt/test/test/setattr_more/file is 40988672 (10007 blocks of 4096 bytes)
|
||||
ext: logical_offset: physical_offset: length: expected: flags:
|
||||
0: 0.. 10006: 0.. 10006: 10007: unknown,eof
|
||||
/mnt/test/test/setattr_more/file: 1 extent found
|
||||
0: offset: 0 0 length: 10007 flags: O.L
|
||||
extents: 1
|
||||
== correct offline extent length
|
||||
976563
|
||||
== omitting data_version should not fail
|
||||
|
||||
97
tests/golden/simple-readdir
Normal file
97
tests/golden/simple-readdir
Normal file
@@ -0,0 +1,97 @@
|
||||
== create content
|
||||
== readdir all
|
||||
00000000: d_off: 0x00000001 d_reclen: 0x18 d_type: DT_DIR d_name: .
|
||||
00000001: d_off: 0x00000002 d_reclen: 0x18 d_type: DT_DIR d_name: ..
|
||||
00000002: d_off: 0x00000003 d_reclen: 0x18 d_type: DT_REG d_name: a
|
||||
00000003: d_off: 0x00000004 d_reclen: 0x20 d_type: DT_REG d_name: aaaaaaaa
|
||||
00000004: d_off: 0x00000005 d_reclen: 0x28 d_type: DT_REG d_name: aaaaaaaaaaaaaaa
|
||||
00000005: d_off: 0x00000006 d_reclen: 0x30 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaa
|
||||
00000006: d_off: 0x00000007 d_reclen: 0x38 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000007: d_off: 0x00000008 d_reclen: 0x38 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000008: d_off: 0x00000009 d_reclen: 0x40 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000009: d_off: 0x0000000a d_reclen: 0x48 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000000a: d_off: 0x0000000b d_reclen: 0x50 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000000b: d_off: 0x0000000c d_reclen: 0x58 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000000c: d_off: 0x0000000d d_reclen: 0x60 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000000d: d_off: 0x0000000e d_reclen: 0x68 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000000e: d_off: 0x0000000f d_reclen: 0x70 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000000f: d_off: 0x00000010 d_reclen: 0x70 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000010: d_off: 0x00000011 d_reclen: 0x78 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000011: d_off: 0x00000012 d_reclen: 0x80 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000012: d_off: 0x00000013 d_reclen: 0x88 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000013: d_off: 0x00000014 d_reclen: 0x90 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000014: d_off: 0x00000015 d_reclen: 0x98 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000015: d_off: 0x00000016 d_reclen: 0xa0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000016: d_off: 0x00000017 d_reclen: 0xa8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000017: d_off: 0x00000018 d_reclen: 0xa8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000018: d_off: 0x00000019 d_reclen: 0xb0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000019: d_off: 0x0000001a d_reclen: 0xb8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001a: d_off: 0x0000001b d_reclen: 0xc0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001b: d_off: 0x0000001c d_reclen: 0xc8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001c: d_off: 0x0000001d d_reclen: 0xd0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001d: d_off: 0x0000001e d_reclen: 0xd8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001e: d_off: 0x0000001f d_reclen: 0xe0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001f: d_off: 0x00000020 d_reclen: 0xe0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000020: d_off: 0x00000021 d_reclen: 0xe8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000021: d_off: 0x00000022 d_reclen: 0xf0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000022: d_off: 0x00000023 d_reclen: 0xf8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000023: d_off: 0x00000024 d_reclen: 0x100 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000024: d_off: 0x00000025 d_reclen: 0x108 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000025: d_off: 0x00000026 d_reclen: 0x110 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
== readdir offset
|
||||
00000014: d_off: 0x00000015 d_reclen: 0x98 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000015: d_off: 0x00000016 d_reclen: 0xa0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000016: d_off: 0x00000017 d_reclen: 0xa8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000017: d_off: 0x00000018 d_reclen: 0xa8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000018: d_off: 0x00000019 d_reclen: 0xb0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000019: d_off: 0x0000001a d_reclen: 0xb8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001a: d_off: 0x0000001b d_reclen: 0xc0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001b: d_off: 0x0000001c d_reclen: 0xc8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001c: d_off: 0x0000001d d_reclen: 0xd0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001d: d_off: 0x0000001e d_reclen: 0xd8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001e: d_off: 0x0000001f d_reclen: 0xe0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001f: d_off: 0x00000020 d_reclen: 0xe0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000020: d_off: 0x00000021 d_reclen: 0xe8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000021: d_off: 0x00000022 d_reclen: 0xf0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000022: d_off: 0x00000023 d_reclen: 0xf8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000023: d_off: 0x00000024 d_reclen: 0x100 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000024: d_off: 0x00000025 d_reclen: 0x108 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000025: d_off: 0x00000026 d_reclen: 0x110 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
== readdir len (bytes)
|
||||
00000000: d_off: 0x00000001 d_reclen: 0x18 d_type: DT_DIR d_name: .
|
||||
00000001: d_off: 0x00000002 d_reclen: 0x18 d_type: DT_DIR d_name: ..
|
||||
00000002: d_off: 0x00000003 d_reclen: 0x18 d_type: DT_REG d_name: a
|
||||
00000003: d_off: 0x00000004 d_reclen: 0x20 d_type: DT_REG d_name: aaaaaaaa
|
||||
00000004: d_off: 0x00000005 d_reclen: 0x28 d_type: DT_REG d_name: aaaaaaaaaaaaaaa
|
||||
00000005: d_off: 0x00000006 d_reclen: 0x30 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaa
|
||||
00000006: d_off: 0x00000007 d_reclen: 0x38 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
== introduce gap
|
||||
00000000: d_off: 0x00000001 d_reclen: 0x18 d_type: DT_DIR d_name: .
|
||||
00000001: d_off: 0x00000002 d_reclen: 0x18 d_type: DT_DIR d_name: ..
|
||||
00000002: d_off: 0x00000003 d_reclen: 0x18 d_type: DT_REG d_name: a
|
||||
00000003: d_off: 0x00000004 d_reclen: 0x20 d_type: DT_REG d_name: aaaaaaaa
|
||||
00000004: d_off: 0x00000005 d_reclen: 0x28 d_type: DT_REG d_name: aaaaaaaaaaaaaaa
|
||||
00000005: d_off: 0x00000006 d_reclen: 0x30 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaa
|
||||
00000006: d_off: 0x00000007 d_reclen: 0x38 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000007: d_off: 0x00000008 d_reclen: 0x38 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000008: d_off: 0x00000009 d_reclen: 0x40 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000009: d_off: 0x00000014 d_reclen: 0x48 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000014: d_off: 0x00000015 d_reclen: 0x98 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000015: d_off: 0x00000016 d_reclen: 0xa0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000016: d_off: 0x00000017 d_reclen: 0xa8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000017: d_off: 0x00000018 d_reclen: 0xa8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000018: d_off: 0x00000019 d_reclen: 0xb0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000019: d_off: 0x0000001a d_reclen: 0xb8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001a: d_off: 0x0000001b d_reclen: 0xc0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001b: d_off: 0x0000001c d_reclen: 0xc8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001c: d_off: 0x0000001d d_reclen: 0xd0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001d: d_off: 0x0000001e d_reclen: 0xd8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001e: d_off: 0x0000001f d_reclen: 0xe0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
0000001f: d_off: 0x00000020 d_reclen: 0xe0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000020: d_off: 0x00000021 d_reclen: 0xe8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000021: d_off: 0x00000022 d_reclen: 0xf0 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000022: d_off: 0x00000023 d_reclen: 0xf8 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000023: d_off: 0x00000024 d_reclen: 0x100 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000024: d_off: 0x00000025 d_reclen: 0x108 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
00000025: d_off: 0x00000026 d_reclen: 0x110 d_type: DT_REG d_name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
== cleanup
|
||||
@@ -1,5 +1,9 @@
|
||||
== create/release/stage single block file
|
||||
0: offset: 0 0 length: 1 flags: O.L
|
||||
extents: 1
|
||||
== create/release/stage larger file
|
||||
0: offset: 0 0 length: 4096 flags: O.L
|
||||
extents: 1
|
||||
== multiple release,drop_cache,stage cycles
|
||||
== release+stage shouldn't change stat, data seq or vers
|
||||
== stage does change meta_seq
|
||||
@@ -7,16 +11,22 @@
|
||||
stage: must provide file version with --data-version
|
||||
Try `stage --help' or `stage --usage' for more information.
|
||||
== wrapped region fails
|
||||
stage returned -1, not 4096: error Invalid argument (22)
|
||||
stage returned -1, not 8192: error Invalid argument (22)
|
||||
scoutfs: stage failed: Input/output error (5)
|
||||
== non-block aligned offset fails
|
||||
stage returned -1, not 4095: error Invalid argument (22)
|
||||
scoutfs: stage failed: Input/output error (5)
|
||||
0: offset: 0 0 length: 1 flags: O.L
|
||||
extents: 1
|
||||
== non-block aligned len within block fails
|
||||
stage returned -1, not 1024: error Invalid argument (22)
|
||||
scoutfs: stage failed: Input/output error (5)
|
||||
0: offset: 0 0 length: 1 flags: O.L
|
||||
extents: 1
|
||||
== partial final block that writes to i_size does work
|
||||
== zero length stage doesn't bring blocks online
|
||||
0: offset: 0 0 length: 100 flags: O.L
|
||||
extents: 1
|
||||
== stage of non-regular file fails
|
||||
ioctl failed: Inappropriate ioctl for device (25)
|
||||
stage: must provide file version with --data-version
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
== create initial files
|
||||
== race stage and release
|
||||
== cleanup
|
||||
|
||||
@@ -1,288 +0,0 @@
|
||||
Ran:
|
||||
generic/001
|
||||
generic/002
|
||||
generic/004
|
||||
generic/005
|
||||
generic/006
|
||||
generic/007
|
||||
generic/011
|
||||
generic/013
|
||||
generic/014
|
||||
generic/020
|
||||
generic/023
|
||||
generic/024
|
||||
generic/028
|
||||
generic/032
|
||||
generic/034
|
||||
generic/035
|
||||
generic/037
|
||||
generic/039
|
||||
generic/040
|
||||
generic/041
|
||||
generic/053
|
||||
generic/056
|
||||
generic/057
|
||||
generic/062
|
||||
generic/065
|
||||
generic/066
|
||||
generic/067
|
||||
generic/069
|
||||
generic/070
|
||||
generic/071
|
||||
generic/073
|
||||
generic/076
|
||||
generic/084
|
||||
generic/086
|
||||
generic/087
|
||||
generic/088
|
||||
generic/090
|
||||
generic/092
|
||||
generic/098
|
||||
generic/101
|
||||
generic/104
|
||||
generic/105
|
||||
generic/106
|
||||
generic/107
|
||||
generic/117
|
||||
generic/124
|
||||
generic/129
|
||||
generic/131
|
||||
generic/169
|
||||
generic/184
|
||||
generic/221
|
||||
generic/228
|
||||
generic/236
|
||||
generic/237
|
||||
generic/245
|
||||
generic/249
|
||||
generic/257
|
||||
generic/258
|
||||
generic/286
|
||||
generic/294
|
||||
generic/306
|
||||
generic/307
|
||||
generic/308
|
||||
generic/309
|
||||
generic/313
|
||||
generic/315
|
||||
generic/319
|
||||
generic/322
|
||||
generic/335
|
||||
generic/336
|
||||
generic/337
|
||||
generic/341
|
||||
generic/342
|
||||
generic/343
|
||||
generic/348
|
||||
generic/360
|
||||
generic/375
|
||||
generic/376
|
||||
generic/377
|
||||
Not
|
||||
run:
|
||||
generic/008
|
||||
generic/009
|
||||
generic/012
|
||||
generic/015
|
||||
generic/016
|
||||
generic/018
|
||||
generic/021
|
||||
generic/022
|
||||
generic/025
|
||||
generic/026
|
||||
generic/031
|
||||
generic/033
|
||||
generic/050
|
||||
generic/052
|
||||
generic/058
|
||||
generic/059
|
||||
generic/060
|
||||
generic/061
|
||||
generic/063
|
||||
generic/064
|
||||
generic/078
|
||||
generic/079
|
||||
generic/081
|
||||
generic/082
|
||||
generic/091
|
||||
generic/094
|
||||
generic/096
|
||||
generic/110
|
||||
generic/111
|
||||
generic/113
|
||||
generic/114
|
||||
generic/115
|
||||
generic/116
|
||||
generic/118
|
||||
generic/119
|
||||
generic/121
|
||||
generic/122
|
||||
generic/123
|
||||
generic/128
|
||||
generic/130
|
||||
generic/134
|
||||
generic/135
|
||||
generic/136
|
||||
generic/138
|
||||
generic/139
|
||||
generic/140
|
||||
generic/142
|
||||
generic/143
|
||||
generic/144
|
||||
generic/145
|
||||
generic/146
|
||||
generic/147
|
||||
generic/148
|
||||
generic/149
|
||||
generic/150
|
||||
generic/151
|
||||
generic/152
|
||||
generic/153
|
||||
generic/154
|
||||
generic/155
|
||||
generic/156
|
||||
generic/157
|
||||
generic/158
|
||||
generic/159
|
||||
generic/160
|
||||
generic/161
|
||||
generic/162
|
||||
generic/163
|
||||
generic/171
|
||||
generic/172
|
||||
generic/173
|
||||
generic/174
|
||||
generic/177
|
||||
generic/178
|
||||
generic/179
|
||||
generic/180
|
||||
generic/181
|
||||
generic/182
|
||||
generic/183
|
||||
generic/185
|
||||
generic/188
|
||||
generic/189
|
||||
generic/190
|
||||
generic/191
|
||||
generic/193
|
||||
generic/194
|
||||
generic/195
|
||||
generic/196
|
||||
generic/197
|
||||
generic/198
|
||||
generic/199
|
||||
generic/200
|
||||
generic/201
|
||||
generic/202
|
||||
generic/203
|
||||
generic/205
|
||||
generic/206
|
||||
generic/207
|
||||
generic/210
|
||||
generic/211
|
||||
generic/212
|
||||
generic/214
|
||||
generic/216
|
||||
generic/217
|
||||
generic/218
|
||||
generic/219
|
||||
generic/220
|
||||
generic/222
|
||||
generic/223
|
||||
generic/225
|
||||
generic/227
|
||||
generic/229
|
||||
generic/230
|
||||
generic/235
|
||||
generic/238
|
||||
generic/240
|
||||
generic/244
|
||||
generic/250
|
||||
generic/252
|
||||
generic/253
|
||||
generic/254
|
||||
generic/255
|
||||
generic/256
|
||||
generic/259
|
||||
generic/260
|
||||
generic/261
|
||||
generic/262
|
||||
generic/263
|
||||
generic/264
|
||||
generic/265
|
||||
generic/266
|
||||
generic/267
|
||||
generic/268
|
||||
generic/271
|
||||
generic/272
|
||||
generic/276
|
||||
generic/277
|
||||
generic/278
|
||||
generic/279
|
||||
generic/281
|
||||
generic/282
|
||||
generic/283
|
||||
generic/284
|
||||
generic/287
|
||||
generic/288
|
||||
generic/289
|
||||
generic/290
|
||||
generic/291
|
||||
generic/292
|
||||
generic/293
|
||||
generic/295
|
||||
generic/296
|
||||
generic/301
|
||||
generic/302
|
||||
generic/303
|
||||
generic/304
|
||||
generic/305
|
||||
generic/312
|
||||
generic/314
|
||||
generic/316
|
||||
generic/317
|
||||
generic/324
|
||||
generic/326
|
||||
generic/327
|
||||
generic/328
|
||||
generic/329
|
||||
generic/330
|
||||
generic/331
|
||||
generic/332
|
||||
generic/353
|
||||
generic/355
|
||||
generic/356
|
||||
generic/357
|
||||
generic/358
|
||||
generic/359
|
||||
generic/361
|
||||
generic/362
|
||||
generic/363
|
||||
generic/364
|
||||
generic/365
|
||||
generic/366
|
||||
generic/367
|
||||
generic/368
|
||||
generic/369
|
||||
generic/370
|
||||
generic/371
|
||||
generic/372
|
||||
generic/373
|
||||
generic/374
|
||||
generic/378
|
||||
generic/379
|
||||
generic/380
|
||||
generic/381
|
||||
generic/382
|
||||
generic/383
|
||||
generic/384
|
||||
generic/385
|
||||
generic/386
|
||||
shared/001
|
||||
shared/002
|
||||
shared/003
|
||||
shared/004
|
||||
shared/032
|
||||
shared/051
|
||||
shared/289
|
||||
Passed all 79 tests
|
||||
|
||||
@@ -39,6 +39,20 @@ cmd() {
|
||||
die "cmd failed (check the run.log)"
|
||||
}
|
||||
|
||||
# we can record pids to kill as we exit, we kill in reverse added order
|
||||
declare -a atexit_kill_pids
|
||||
atexit_kill()
|
||||
{
|
||||
local pid
|
||||
|
||||
for pid in $(echo ${atexit_kill_pids[*]} | rev); do
|
||||
if test -e "/proc/$pid/status" ; then
|
||||
kill "$pid"
|
||||
fi
|
||||
done
|
||||
}
|
||||
trap atexit_kill EXIT
|
||||
|
||||
show_help()
|
||||
{
|
||||
cat << EOF
|
||||
@@ -56,6 +70,7 @@ $(basename $0) options:
|
||||
| only tests matching will be run. Can be provided multiple
|
||||
| times
|
||||
-i | Force removing and inserting the built scoutfs.ko module.
|
||||
-l <nr> | Loop each test <nr> times while passing, last run counts.
|
||||
-M <file> | Specify the filesystem's meta data device path that contains
|
||||
| the file system to be tested. Will be clobbered by -m mkfs.
|
||||
-m | Run mkfs on the device before mounting and running
|
||||
@@ -69,10 +84,12 @@ $(basename $0) options:
|
||||
-r <dir> | Specify the directory in which to store results of
|
||||
| test runs. The directory will be created if it doesn't
|
||||
| exist. Previous results will be deleted as each test runs.
|
||||
-R | shuffle the test order randomly using shuf
|
||||
-s | Skip git repo checkouts.
|
||||
-t | Enabled trace events that match the given glob argument.
|
||||
| Multiple options enable multiple globbed events.
|
||||
-T <nr> | Multiply the original trace buffer size by nr during the run.
|
||||
-V <nr> | Set mkfs device format version.
|
||||
-X | xfstests git repo. Used by tests/xfstests.sh.
|
||||
-x | xfstests git branch to checkout and track.
|
||||
-y | xfstests ./check additional args
|
||||
@@ -88,6 +105,8 @@ done
|
||||
# set some T_ defaults
|
||||
T_TRACE_DUMP="0"
|
||||
T_TRACE_PRINTK="0"
|
||||
T_PORT_START="19700"
|
||||
T_LOOP_ITER="1"
|
||||
|
||||
# array declarations to be able to use array ops
|
||||
declare -a T_TRACE_GLOB
|
||||
@@ -128,6 +147,12 @@ while true; do
|
||||
-i)
|
||||
T_INSMOD="1"
|
||||
;;
|
||||
-l)
|
||||
test -n "$2" || die "-l must have a nr iterations argument"
|
||||
test "$2" -eq "$2" 2>/dev/null || die "-l <nr> argument must be an integer"
|
||||
T_LOOP_ITER="$2"
|
||||
shift
|
||||
;;
|
||||
-M)
|
||||
test -n "$2" || die "-z must have meta device file argument"
|
||||
T_META_DEVICE="$2"
|
||||
@@ -163,6 +188,9 @@ while true; do
|
||||
T_RESULTS="$2"
|
||||
shift
|
||||
;;
|
||||
-R)
|
||||
T_SHUF="1"
|
||||
;;
|
||||
-s)
|
||||
T_SKIP_CHECKOUT="1"
|
||||
;;
|
||||
@@ -176,6 +204,11 @@ while true; do
|
||||
T_TRACE_MULT="$2"
|
||||
shift
|
||||
;;
|
||||
-V)
|
||||
test -n "$2" || die "-V must have a format version argument"
|
||||
T_MKFS_FORMAT_VERSION="-V $2"
|
||||
shift
|
||||
;;
|
||||
-X)
|
||||
test -n "$2" || die "-X requires xfstests git repo dir argument"
|
||||
T_XFSTESTS_REPO="$2"
|
||||
@@ -255,13 +288,37 @@ for e in T_META_DEVICE T_DATA_DEVICE T_EX_META_DEV T_EX_DATA_DEV T_KMOD T_RESULT
|
||||
eval $e=\"$(readlink -f "${!e}")\"
|
||||
done
|
||||
|
||||
# try and check ports, but not necessary
|
||||
T_TEST_PORT="$T_PORT_START"
|
||||
T_SCRATCH_PORT="$((T_PORT_START + 100))"
|
||||
T_DEV_PORT="$((T_PORT_START + 200))"
|
||||
read local_start local_end < /proc/sys/net/ipv4/ip_local_port_range
|
||||
if [ -n "$local_start" -a -n "$local_end" -a "$local_start" -lt "$local_end" ]; then
|
||||
if [ ! "$T_DEV_PORT" -lt "$local_start" -a ! "$T_TEST_PORT" -gt "$local_end" ]; then
|
||||
die "listening port range $T_TEST_PORT - $T_DEV_PORT is within local dynamic port range $local_start - $local_end in /proc/sys/net/ipv4/ip_local_port_range"
|
||||
fi
|
||||
fi
|
||||
|
||||
# permute sequence?
|
||||
T_SEQUENCE=sequence
|
||||
if [ -n "$T_SHUF" ]; then
|
||||
msg "shuffling test order"
|
||||
shuf sequence -o sequence.shuf
|
||||
# keep xfstests at the end
|
||||
if grep -q 'xfstests.sh' sequence.shuf ; then
|
||||
sed -i '/xfstests.sh/d' sequence.shuf
|
||||
echo "xfstests.sh" >> sequence.shuf
|
||||
fi
|
||||
T_SEQUENCE=sequence.shuf
|
||||
fi
|
||||
|
||||
# include everything by default
|
||||
test -z "$T_INCLUDE" && T_INCLUDE="-e '.*'"
|
||||
# (quickly) exclude nothing by default
|
||||
test -z "$T_EXCLUDE" && T_EXCLUDE="-e '\Zx'"
|
||||
|
||||
# eval to strip re ticks but not expand
|
||||
tests=$(grep -v "^#" sequence |
|
||||
tests=$(grep -v "^#" $T_SEQUENCE |
|
||||
eval grep "$T_INCLUDE" | eval grep -v "$T_EXCLUDE")
|
||||
test -z "$tests" && \
|
||||
die "no tests found by including $T_INCLUDE and excluding $T_EXCLUDE"
|
||||
@@ -340,11 +397,11 @@ fi
|
||||
quo=""
|
||||
if [ -n "$T_MKFS" ]; then
|
||||
for i in $(seq -0 $((T_QUORUM - 1))); do
|
||||
quo="$quo -Q $i,127.0.0.1,$((42000 + i))"
|
||||
quo="$quo -Q $i,127.0.0.1,$((T_TEST_PORT + i))"
|
||||
done
|
||||
|
||||
msg "making new filesystem with $T_QUORUM quorum members"
|
||||
cmd scoutfs mkfs -f $quo $T_DATA_ALLOC_ZONE_BLOCKS \
|
||||
cmd scoutfs mkfs -f $quo $T_DATA_ALLOC_ZONE_BLOCKS $T_MKFS_FORMAT_VERSION \
|
||||
"$T_META_DEVICE" "$T_DATA_DEVICE"
|
||||
fi
|
||||
|
||||
@@ -352,7 +409,8 @@ if [ -n "$T_INSMOD" ]; then
|
||||
msg "removing and reinserting scoutfs module"
|
||||
test -e /sys/module/scoutfs && cmd rmmod scoutfs
|
||||
cmd modprobe libcrc32c
|
||||
cmd insmod "$T_KMOD/src/scoutfs.ko"
|
||||
T_MODULE="$T_KMOD/src/scoutfs.ko"
|
||||
cmd insmod "$T_MODULE"
|
||||
fi
|
||||
|
||||
if [ -n "$T_TRACE_MULT" ]; then
|
||||
@@ -407,26 +465,44 @@ EOF
|
||||
export SCOUTFS_FENCED_CONFIG_FILE="$conf"
|
||||
T_FENCED_LOG="$T_RESULTS/fenced.log"
|
||||
|
||||
#
|
||||
# Run the agent in the background, log its output, an kill it if we
|
||||
# exit
|
||||
#
|
||||
fenced_log()
|
||||
{
|
||||
echo "[$(timestamp)] $*" >> "$T_FENCED_LOG"
|
||||
}
|
||||
fenced_pid=""
|
||||
kill_fenced()
|
||||
{
|
||||
if test -n "$fenced_pid" -a -d "/proc/$fenced_pid" ; then
|
||||
fenced_log "killing fenced pid $fenced_pid"
|
||||
kill "$fenced_pid"
|
||||
fi
|
||||
}
|
||||
trap kill_fenced EXIT
|
||||
$T_UTILS/fenced/scoutfs-fenced > "$T_FENCED_LOG" 2>&1 &
|
||||
fenced_pid=$!
|
||||
fenced_log "started fenced pid $fenced_pid in the background"
|
||||
atexit_kill_pids+=($fenced_pid)
|
||||
|
||||
#
|
||||
# some critical failures will cause fs operations to hang. We can watch
|
||||
# for evidence of them and cause the system to crash, at least.
|
||||
#
|
||||
crash_monitor()
|
||||
{
|
||||
local bad=0
|
||||
|
||||
while sleep 1; do
|
||||
if dmesg | grep -q "inserting extent.*overlaps existing"; then
|
||||
echo "run-tests monitor saw overlapping extent message"
|
||||
bad=1
|
||||
fi
|
||||
|
||||
if dmesg | grep -q "error indicated by fence action" ; then
|
||||
echo "run-tests monitor saw fence agent error message"
|
||||
bad=1
|
||||
fi
|
||||
|
||||
if [ ! -e "/proc/${fenced_pid}/status" ]; then
|
||||
echo "run-tests monitor didn't see fenced pid $fenced_pid /proc dir"
|
||||
bad=1
|
||||
fi
|
||||
|
||||
if [ "$bad" != 0 ]; then
|
||||
echo "run-tests monitor triggering crash"
|
||||
echo c > /proc/sysrq-trigger
|
||||
# bg function doesn't reload bash, $$ is parent run-tests.sh
|
||||
kill -9 $$
|
||||
fi
|
||||
done
|
||||
}
|
||||
crash_monitor &
|
||||
atexit_kill_pids+=($!)
|
||||
|
||||
# setup dm tables
|
||||
echo "0 $(blockdev --getsz $T_META_DEVICE) linear $T_META_DEVICE 0" > \
|
||||
@@ -499,109 +575,130 @@ fi
|
||||
. funcs/filter.sh
|
||||
|
||||
# give tests access to built binaries in src/, prefer over installed
|
||||
PATH="$PWD/src:$PATH"
|
||||
export PATH="$PWD/src:$PATH"
|
||||
|
||||
msg "running tests"
|
||||
> "$T_RESULTS/skip.log"
|
||||
> "$T_RESULTS/fail.log"
|
||||
|
||||
# generate a test ID to make sure we can de-duplicate TAP results in aggregation
|
||||
. funcs/tap.sh
|
||||
t_tap_header $(uuidgen)
|
||||
|
||||
testcount=0
|
||||
passed=0
|
||||
skipped=0
|
||||
failed=0
|
||||
skipped_permitted=0
|
||||
for t in $tests; do
|
||||
# tests has basenames from sequence, get path and name
|
||||
t="tests/$t"
|
||||
test_name=$(basename "$t" | sed -e 's/.sh$//')
|
||||
|
||||
# create a temporary dir and file path for the test
|
||||
T_TMPDIR="$T_RESULTS/tmp/$test_name"
|
||||
T_TMP="$T_TMPDIR/tmp"
|
||||
cmd rm -rf "$T_TMPDIR"
|
||||
cmd mkdir -p "$T_TMPDIR"
|
||||
|
||||
# create a test name dir in the fs
|
||||
T_DS=""
|
||||
for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="${T_M[$i]}/test/$test_name"
|
||||
|
||||
test $i == 0 && cmd mkdir -p "$dir"
|
||||
|
||||
eval T_D$i=$dir
|
||||
T_D[$i]=$dir
|
||||
T_DS+="$dir "
|
||||
done
|
||||
|
||||
# export all our T_ variables
|
||||
for v in ${!T_*}; do
|
||||
eval export $v
|
||||
done
|
||||
export PATH # give test access to scoutfs binary
|
||||
|
||||
# prepare to compare output to golden output
|
||||
test -e "$T_RESULTS/output" || cmd mkdir -p "$T_RESULTS/output"
|
||||
out="$T_RESULTS/output/$test_name"
|
||||
> "$T_TMPDIR/status.msg"
|
||||
golden="golden/$test_name"
|
||||
|
||||
# get stats from previous pass
|
||||
last="$T_RESULTS/last-passed-test-stats"
|
||||
stats=$(grep -s "^$test_name " "$last" | cut -d " " -f 2-)
|
||||
test -n "$stats" && stats="last: $stats"
|
||||
|
||||
printf " %-30s $stats" "$test_name"
|
||||
|
||||
# record dmesg before
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.before"
|
||||
# mark in dmesg as to what test we are running
|
||||
echo "run scoutfs test $test_name" > /dev/kmsg
|
||||
|
||||
# give tests stdout and compared output on specific fds
|
||||
exec 6>&1
|
||||
exec 7>$out
|
||||
# let the test get at its extra files
|
||||
T_EXTRA="$T_TESTS/extra/$test_name"
|
||||
|
||||
# run the test with access to our functions
|
||||
start_secs=$SECONDS
|
||||
bash -c "for f in funcs/*.sh; do . \$f; done; . $t" >&7 2>&1
|
||||
sts="$?"
|
||||
log "test $t exited with status $sts"
|
||||
stats="$((SECONDS - start_secs))s"
|
||||
for iter in $(seq 1 $T_LOOP_ITER); do
|
||||
|
||||
# close our weird descriptors
|
||||
exec 6>&-
|
||||
exec 7>&-
|
||||
# create a temporary dir and file path for the test
|
||||
T_TMPDIR="$T_RESULTS/tmp/$test_name"
|
||||
T_TMP="$T_TMPDIR/tmp"
|
||||
cmd rm -rf "$T_TMPDIR"
|
||||
cmd mkdir -p "$T_TMPDIR"
|
||||
|
||||
# compare output if the test returned passed status
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
if [ ! -e "$golden" ]; then
|
||||
message="no golden output"
|
||||
sts=$T_FAIL_STATUS
|
||||
elif ! cmp -s "$golden" "$out"; then
|
||||
message="output differs"
|
||||
sts=$T_FAIL_STATUS
|
||||
diff -u "$golden" "$out" >> "$T_RESULTS/fail.log"
|
||||
# create a test name dir in the fs, clean up old data as needed
|
||||
T_DS=""
|
||||
for i in $(seq 0 $((T_NR_MOUNTS - 1))); do
|
||||
dir="${T_M[$i]}/test/$test_name"
|
||||
|
||||
test $i == 0 && (
|
||||
test -d "$dir" && cmd rm -rf "$dir"
|
||||
cmd mkdir -p "$dir"
|
||||
)
|
||||
|
||||
eval T_D$i=$dir
|
||||
T_D[$i]=$dir
|
||||
T_DS+="$dir "
|
||||
done
|
||||
|
||||
# export all our T_ variables
|
||||
for v in ${!T_*}; do
|
||||
eval export $v
|
||||
done
|
||||
|
||||
# prepare to compare output to golden output
|
||||
test -e "$T_RESULTS/output" || cmd mkdir -p "$T_RESULTS/output"
|
||||
out="$T_RESULTS/output/$test_name"
|
||||
> "$T_TMPDIR/status.msg"
|
||||
golden="golden/$test_name"
|
||||
|
||||
# record dmesg before
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.before"
|
||||
|
||||
# give tests stdout and compared output on specific fds
|
||||
exec 6>&1
|
||||
exec 7>$out
|
||||
|
||||
# run the test with access to our functions
|
||||
start_secs=$SECONDS
|
||||
bash -c "for f in funcs/*.sh; do . \$f; done; . $t" >&7 2>&1
|
||||
sts="$?"
|
||||
log "test $t exited with status $sts"
|
||||
stats="$((SECONDS - start_secs))s"
|
||||
|
||||
# close our weird descriptors
|
||||
exec 6>&-
|
||||
exec 7>&-
|
||||
|
||||
# compare output if the test returned passed status
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
if [ ! -e "$golden" ]; then
|
||||
message="no golden output"
|
||||
sts=$T_FAIL_STATUS
|
||||
elif ! cmp -s "$golden" "$out"; then
|
||||
message="output differs"
|
||||
sts=$T_FAIL_STATUS
|
||||
diff -u "$golden" "$out" >> "$T_RESULTS/fail.log"
|
||||
fi
|
||||
else
|
||||
# get message from t_*() functions
|
||||
message=$(cat "$T_TMPDIR/status.msg")
|
||||
fi
|
||||
else
|
||||
# get message from t_*() functions
|
||||
message=$(cat "$T_TMPDIR/status.msg")
|
||||
fi
|
||||
|
||||
# see if anything unexpected was added to dmesg
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.after"
|
||||
diff --old-line-format="" --unchanged-line-format="" \
|
||||
"$T_TMPDIR/dmesg.before" "$T_TMPDIR/dmesg.after" > \
|
||||
"$T_TMPDIR/dmesg.new"
|
||||
# see if anything unexpected was added to dmesg
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
dmesg | t_filter_dmesg > "$T_TMPDIR/dmesg.after"
|
||||
diff --old-line-format="" --unchanged-line-format="" \
|
||||
"$T_TMPDIR/dmesg.before" "$T_TMPDIR/dmesg.after" > \
|
||||
"$T_TMPDIR/dmesg.new"
|
||||
|
||||
if [ -s "$T_TMPDIR/dmesg.new" ]; then
|
||||
message="unexpected messages in dmesg"
|
||||
sts=$T_FAIL_STATUS
|
||||
cat "$T_TMPDIR/dmesg.new" >> "$T_RESULTS/fail.log"
|
||||
if [ -s "$T_TMPDIR/dmesg.new" ]; then
|
||||
message="unexpected messages in dmesg"
|
||||
sts=$T_FAIL_STATUS
|
||||
cat "$T_TMPDIR/dmesg.new" >> "$T_RESULTS/fail.log"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# record unknown exit status
|
||||
if [ "$sts" -lt "$T_FIRST_STATUS" -o "$sts" -gt "$T_LAST_STATUS" ]; then
|
||||
message="unknown status: $sts"
|
||||
sts=$T_FAIL_STATUS
|
||||
fi
|
||||
# record unknown exit status
|
||||
if [ "$sts" -lt "$T_FIRST_STATUS" -o "$sts" -gt "$T_LAST_STATUS" ]; then
|
||||
message="unknown status: $sts"
|
||||
sts=$T_FAIL_STATUS
|
||||
fi
|
||||
|
||||
# stop looping if we didn't pass
|
||||
if [ "$sts" != "$T_PASS_STATUS" ]; then
|
||||
break;
|
||||
fi
|
||||
done
|
||||
|
||||
# show and record the result of the test
|
||||
if [ "$sts" == "$T_PASS_STATUS" ]; then
|
||||
@@ -611,6 +708,10 @@ for t in $tests; do
|
||||
grep -s -v "^$test_name " "$last" > "$last.tmp"
|
||||
echo "$test_name $stats" >> "$last.tmp"
|
||||
mv -f "$last.tmp" "$last"
|
||||
elif [ "$sts" == "$T_SKIP_PERMITTED_STATUS" ]; then
|
||||
echo " [ skipped (permitted): $message ]"
|
||||
echo "$test_name skipped (permitted) $message " >> "$T_RESULTS/skip.log"
|
||||
((skipped_permitted++))
|
||||
elif [ "$sts" == "$T_SKIP_STATUS" ]; then
|
||||
echo " [ skipped: $message ]"
|
||||
echo "$test_name $message" >> "$T_RESULTS/skip.log"
|
||||
@@ -622,9 +723,14 @@ for t in $tests; do
|
||||
|
||||
test -n "$T_ABORT" && die "aborting after first failure"
|
||||
fi
|
||||
|
||||
# record results for TAP format output
|
||||
t_tap_progress $test_name $sts
|
||||
((testcount++))
|
||||
|
||||
done
|
||||
|
||||
msg "all tests run: $passed passed, $skipped skipped, $failed failed"
|
||||
msg "all tests run: $passed passed, $skipped skipped, $skipped_permitted skipped (permitted), $failed failed"
|
||||
|
||||
|
||||
if [ -n "$T_TRACE_GLOB" -o -n "$T_TRACE_PRINTK" ]; then
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
export-get-name-parent.sh
|
||||
basic-block-counts.sh
|
||||
basic-bad-mounts.sh
|
||||
basic-posix-acl.sh
|
||||
inode-items-updated.sh
|
||||
simple-inode-index.sh
|
||||
simple-staging.sh
|
||||
simple-release-extents.sh
|
||||
simple-readdir.sh
|
||||
get-referring-entries.sh
|
||||
fallocate.sh
|
||||
basic-truncate.sh
|
||||
@@ -12,14 +14,20 @@ data-prealloc.sh
|
||||
setattr_more.sh
|
||||
offline-extent-waiting.sh
|
||||
move-blocks.sh
|
||||
projects.sh
|
||||
large-fragmented-free.sh
|
||||
format-version-forward-back.sh
|
||||
enospc.sh
|
||||
mmap.sh
|
||||
srch-safe-merge-pos.sh
|
||||
srch-basic-functionality.sh
|
||||
simple-xattr-unit.sh
|
||||
retention-basic.sh
|
||||
totl-xattr-tag.sh
|
||||
quota.sh
|
||||
lock-refleak.sh
|
||||
lock-shrink-consistency.sh
|
||||
lock-shrink-read-race.sh
|
||||
lock-pr-cw-conflict.sh
|
||||
lock-revoke-getcwd.sh
|
||||
lock-recover-invalidate.sh
|
||||
|
||||
181
tests/src/mmap_stress.c
Normal file
181
tests/src/mmap_stress.c
Normal file
@@ -0,0 +1,181 @@
|
||||
#define _GNU_SOURCE
|
||||
/*
|
||||
* mmap() stress test for scoutfs
|
||||
*
|
||||
* This test exercises the scoutfs kernel module's locking by
|
||||
* repeatedly reading/writing using mmap and pread/write calls
|
||||
* across 5 clients (mounts).
|
||||
*
|
||||
* Each thread operates on a single thread/client, and performs
|
||||
* operations in a random order on the file.
|
||||
*
|
||||
* The goal is to assure that locking between _page_mkwrite vfs
|
||||
* calls and the normal read/write paths do not cause deadlocks.
|
||||
*
|
||||
* There is no content validation performed. All that is done is
|
||||
* assure that the programs continues without errors.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#include <sys/mman.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
|
||||
static int size = 0;
|
||||
static int count = 0; /* XXX make this duration instead */
|
||||
|
||||
struct thread_info {
|
||||
int nr;
|
||||
int fd;
|
||||
};
|
||||
|
||||
static void *run_test_func(void *ptr)
|
||||
{
|
||||
void *buf = NULL;
|
||||
char *addr = NULL;
|
||||
struct thread_info *tinfo = ptr;
|
||||
int c = 0;
|
||||
int fd;
|
||||
ssize_t read, written, ret;
|
||||
int preads = 0, pwrites = 0, mreads = 0, mwrites = 0;
|
||||
|
||||
fd = tinfo->fd;
|
||||
|
||||
if (posix_memalign(&buf, 4096, size) != 0) {
|
||||
perror("calloc");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
addr = mmap(NULL, size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
perror("mmap");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
usleep(100000); /* 0.1sec to allow all threads to start roughly at the same time */
|
||||
|
||||
for (;;) {
|
||||
if (++c > count)
|
||||
break;
|
||||
|
||||
switch (rand() % 4) {
|
||||
case 0: /* pread */
|
||||
preads++;
|
||||
for (read = 0; read < size;) {
|
||||
ret = pread(fd, buf, size - read, read);
|
||||
if (ret < 0) {
|
||||
perror("pwrite");
|
||||
exit(-1);
|
||||
}
|
||||
read += ret;
|
||||
}
|
||||
break;
|
||||
case 1: /* pwrite */
|
||||
pwrites++;
|
||||
memset(buf, (char)(c & 0xff), size);
|
||||
for (written = 0; written < size;) {
|
||||
ret = pwrite(fd, buf, size - written, written);
|
||||
if (ret < 0) {
|
||||
perror("pwrite");
|
||||
exit(-1);
|
||||
}
|
||||
written += ret;
|
||||
}
|
||||
break;
|
||||
case 2: /* mmap read */
|
||||
mreads++;
|
||||
memcpy(buf, addr, size); /* noerr */
|
||||
break;
|
||||
case 3: /* mmap write */
|
||||
mwrites++;
|
||||
memset(buf, (char)(c & 0xff), size);
|
||||
memcpy(addr, buf, size); /* noerr */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
munmap(addr, size);
|
||||
|
||||
free(buf);
|
||||
|
||||
printf("thread %u complete: preads %u pwrites %u mreads %u mwrites %u\n", tinfo->nr,
|
||||
mreads, mwrites, preads, pwrites);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
pthread_t thread[5];
|
||||
struct thread_info tinfo[5];
|
||||
int fd[5];
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (argc != 8) {
|
||||
fprintf(stderr, "%s requires 7 arguments - size count file1 file2 file3 file4 file5\n", argv[0]);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
size = atoi(argv[1]);
|
||||
if (size <= 0) {
|
||||
fprintf(stderr, "invalid size, must be greater than 0\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
count = atoi(argv[2]);
|
||||
if (count < 0) {
|
||||
fprintf(stderr, "invalid count, must be greater than 0\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* create and truncate one fd */
|
||||
fd[0] = open(argv[3], O_RDWR | O_CREAT | O_TRUNC, 00644);
|
||||
if (fd[0] < 0) {
|
||||
perror("open");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* make it the test size */
|
||||
if (posix_fallocate(fd[0], 0, size) != 0) {
|
||||
perror("fallocate");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* now open the rest of the fds */
|
||||
for (i = 1; i < 5; i++) {
|
||||
fd[i] = open(argv[3+i], O_RDWR);
|
||||
if (fd[i] < 0) {
|
||||
perror("open");
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
/* start threads */
|
||||
for (i = 0; i < 5; i++) {
|
||||
tinfo[i].fd = fd[i];
|
||||
tinfo[i].nr = i;
|
||||
ret = pthread_create(&thread[i], NULL, run_test_func, (void*)&tinfo[i]);
|
||||
|
||||
if (ret) {
|
||||
perror("pthread_create");
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
/* wait for complete */
|
||||
for (i = 0; i < 5; i++)
|
||||
pthread_join(thread[i], NULL);
|
||||
|
||||
for (i = 0; i < 5; i++)
|
||||
close(fd[i]);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
159
tests/src/mmap_validate.c
Normal file
159
tests/src/mmap_validate.c
Normal file
@@ -0,0 +1,159 @@
|
||||
#define _GNU_SOURCE
|
||||
/*
|
||||
* mmap() content consistency checking for scoutfs
|
||||
*
|
||||
* This test program validates that content from memory mappings
|
||||
* are consistent across clients, whether written/read with mmap or
|
||||
* normal writes/reads.
|
||||
*
|
||||
* One side of (read/write) will always be memory mapped. It may
|
||||
* be that both sides do memory mapped (33% of the time).
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
|
||||
static int count = 0;
|
||||
static int size = 0;
|
||||
|
||||
static void run_test_func(int fd1, int fd2)
|
||||
{
|
||||
void *buf1 = NULL;
|
||||
void *buf2 = NULL;
|
||||
char *addr1 = NULL;
|
||||
char *addr2 = NULL;
|
||||
int c = 0;
|
||||
ssize_t read, written, ret;
|
||||
|
||||
/* buffers for both sides to compare */
|
||||
if (posix_memalign(&buf1, 4096, size) != 0) {
|
||||
perror("calloc1");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (posix_memalign(&buf2, 4096, size) != 0) {
|
||||
perror("calloc1");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* memory maps for both sides */
|
||||
addr1 = mmap(NULL, size, PROT_WRITE | PROT_READ, MAP_SHARED, fd1, 0);
|
||||
if (addr1 == MAP_FAILED) {
|
||||
perror("mmap1");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
addr2 = mmap(NULL, size, PROT_WRITE | PROT_READ, MAP_SHARED, fd2, 0);
|
||||
if (addr2 == MAP_FAILED) {
|
||||
perror("mmap2");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
if (++c > count) /* 10k iterations */
|
||||
break;
|
||||
|
||||
/* put a pattern in buf1 */
|
||||
memset(buf1, c & 0xff, size);
|
||||
|
||||
/* pwrite or mmap write from buf1 */
|
||||
switch (c % 3) {
|
||||
case 0: /* pwrite */
|
||||
for (written = 0; written < size;) {
|
||||
ret = pwrite(fd1, buf1, size - written, written);
|
||||
if (ret < 0) {
|
||||
perror("pwrite");
|
||||
exit(-1);
|
||||
}
|
||||
written += ret;
|
||||
}
|
||||
break;
|
||||
default: /* mmap write */
|
||||
memcpy(addr1, buf1, size);
|
||||
break;
|
||||
}
|
||||
|
||||
/* pread or mmap read to buf2 */
|
||||
switch (c % 3) {
|
||||
case 2: /* pread */
|
||||
for (read = 0; read < size;) {
|
||||
ret = pread(fd2, buf2, size - read, read);
|
||||
if (ret < 0) {
|
||||
perror("pwrite");
|
||||
exit(-1);
|
||||
}
|
||||
read += ret;
|
||||
}
|
||||
break;
|
||||
default: /* mmap read */
|
||||
memcpy(buf2, addr2, size);
|
||||
break;
|
||||
}
|
||||
|
||||
/* compare bufs */
|
||||
if (memcmp(buf1, buf2, size) != 0) {
|
||||
fprintf(stderr, "memcmp() failed\n");
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
munmap(addr1, size);
|
||||
munmap(addr2, size);
|
||||
|
||||
free(buf1);
|
||||
free(buf2);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int fd[1];
|
||||
|
||||
if (argc != 5) {
|
||||
fprintf(stderr, "%s requires 4 arguments - size count file1 file2\n", argv[0]);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
size = atoi(argv[1]);
|
||||
if (size <= 0) {
|
||||
fprintf(stderr, "invalid size, must be greater than 0\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
count = atoi(argv[2]);
|
||||
if (count < 3) {
|
||||
fprintf(stderr, "invalid count, must be greater than 3\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* create and truncate one fd */
|
||||
fd[0] = open(argv[3], O_RDWR | O_CREAT | O_TRUNC, 00644);
|
||||
if (fd[0] < 0) {
|
||||
perror("open");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
fd[1] = open(argv[4], O_RDWR , 00644);
|
||||
if (fd[1] < 0) {
|
||||
perror("open");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* make it the test size */
|
||||
if (posix_fallocate(fd[0], 0, size) != 0) {
|
||||
perror("fallocate");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* run the test function */
|
||||
run_test_func(fd[0], fd[1]);
|
||||
|
||||
close(fd[0]);
|
||||
close(fd[1]);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
71
tests/src/o_tmpfile_linkat.c
Normal file
71
tests/src/o_tmpfile_linkat.c
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <sys/stat.h>
|
||||
#include <assert.h>
|
||||
#include <limits.h>
|
||||
|
||||
static void linkat_tmpfile(char *dir, char *lpath)
|
||||
{
|
||||
char proc_self[PATH_MAX];
|
||||
int ret;
|
||||
int fd;
|
||||
|
||||
fd = open(dir, O_RDWR | O_TMPFILE, 0777);
|
||||
if (fd < 0) {
|
||||
perror("open(O_TMPFILE)");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
snprintf(proc_self, sizeof(proc_self), "/proc/self/fd/%d", fd);
|
||||
|
||||
ret = linkat(AT_FDCWD, proc_self, AT_FDCWD, lpath, AT_SYMLINK_FOLLOW);
|
||||
if (ret < 0) {
|
||||
perror("linkat");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
close(fd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use O_TMPFILE and linkat to create a new visible file, used to test
|
||||
* the O_TMPFILE creation path by inspecting the created file.
|
||||
*/
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char *lpath;
|
||||
char *dir;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("%s <open_dir> <linkat_path>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
dir = argv[1];
|
||||
lpath = argv[2];
|
||||
|
||||
linkat_tmpfile(dir, lpath);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -15,7 +15,7 @@ echo "== prepare devices, mount point, and logs"
|
||||
SCR="$T_TMPDIR/mnt.scratch"
|
||||
mkdir -p "$SCR"
|
||||
> $T_TMP.mount.out
|
||||
scoutfs mkfs -f -Q 0,127.0.0.1,53000 "$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 \
|
||||
scoutfs mkfs -f -Q 0,127.0.0.1,$T_SCRATCH_PORT "$T_EX_META_DEV" "$T_EX_DATA_DEV" > $T_TMP.mkfs.out 2>&1 \
|
||||
|| t_fail "mkfs failed"
|
||||
|
||||
echo "== bad devices, bad options"
|
||||
|
||||
110
tests/tests/basic-posix-acl.sh
Normal file
110
tests/tests/basic-posix-acl.sh
Normal file
@@ -0,0 +1,110 @@
|
||||
|
||||
#
|
||||
# test basic POSIX acl functionality.
|
||||
#
|
||||
|
||||
t_require_commands stat rm touch mkdir getfacl setfacl id sudo
|
||||
t_require_mounts 2
|
||||
|
||||
# from quota.sh
|
||||
TEST_UID=22222
|
||||
TEST_GID=44444
|
||||
|
||||
# sys_setreuid() set fs[uid] to e[ug]id
|
||||
SET_UID="--ruid=$TEST_UID --euid=$TEST_UID"
|
||||
SET_GID="--rgid=$TEST_GID --egid=$TEST_GID --clear-groups"
|
||||
|
||||
# helper to avoid capturing dates from ls output
|
||||
L() {
|
||||
stat -c "%F %A %u %g %s %N" $@
|
||||
}
|
||||
|
||||
echo "== setup test directory"
|
||||
cd "$T_D0"
|
||||
|
||||
echo "== getfacl"
|
||||
L .
|
||||
getfacl .
|
||||
|
||||
echo "== basic non-acl access through permissions"
|
||||
rm -rf dir-testuid
|
||||
mkdir dir-testuid
|
||||
ln -sf dir-testuid symlinkdir-testuid
|
||||
chown root:44444 dir-testuid
|
||||
L dir-testuid
|
||||
setpriv $SET_UID $SET_GID touch dir-testuid/file-group-write
|
||||
setpriv $SET_UID $SET_GID touch symlinkdir-testuid/symlink-file-group-write
|
||||
chmod g+w dir-testuid
|
||||
setpriv $SET_UID $SET_GID touch dir-testuid/file-group-write
|
||||
setpriv $SET_UID $SET_GID touch symlinkdir-testuid/symlink-file-group-write
|
||||
L dir-testuid/file-group-write
|
||||
L symlinkdir-testuid/symlink-file-group-write
|
||||
|
||||
echo "== basic acl access"
|
||||
rm -rf dir-root
|
||||
mkdir dir-root
|
||||
ln -sf dir-root symlinkdir-root
|
||||
L dir-root
|
||||
setpriv $SET_UID touch dir-root/file-group-write
|
||||
setpriv $SET_UID touch symlinkdir-root/file-group-write
|
||||
setfacl -m u:22222:rwx dir-root
|
||||
getfacl dir-root
|
||||
setpriv $SET_UID touch dir-root/file-group-write
|
||||
setpriv $SET_UID touch symlinkdir-root/file-group-write
|
||||
L dir-root/file-group-write
|
||||
L symlinkdir-root/file-group-write
|
||||
|
||||
echo "== directory exec"
|
||||
setpriv $SET_UID bash -c "cd dir-root 2>&- && echo Success"
|
||||
setpriv $SET_UID bash -c "cd symlinkdir-root 2>&- && echo Success"
|
||||
setfacl -m u:22222:rw dir-root
|
||||
getfacl dir-root
|
||||
setpriv $SET_UID bash -c "cd dir-root 2>&- || echo Failed"
|
||||
setpriv $SET_UID bash -c "cd symlinkdir-root 2>&- || echo Failed"
|
||||
setfacl -m g:44444:rwx dir-root
|
||||
getfacl dir-root
|
||||
setpriv $SET_GID bash -c "cd dir-root 2>&- && echo Success"
|
||||
setpriv $SET_GID bash -c "cd symlinkdir-root 2>&- && echo Success"
|
||||
|
||||
echo "== get/set attr"
|
||||
rm -rf file-root
|
||||
touch file-root
|
||||
L file-root
|
||||
setpriv $SET_UID getfattr -d file-root
|
||||
setpriv $SET_UID setfattr -n "user.test1" -v "Success" file-root
|
||||
setpriv $SET_UID getfattr -d file-root
|
||||
setfacl -m u:22222:rw file-root
|
||||
getfacl file-root
|
||||
setpriv $SET_UID setfattr -n "user.test2" -v "Success" file-root
|
||||
setpriv $SET_UID getfattr -d file-root
|
||||
setfacl -x u:22222 file-root
|
||||
getfacl file-root
|
||||
setpriv $SET_UID setfattr -n "user.test3" -v "Success" file-root
|
||||
setpriv $SET_UID getfattr -d file-root
|
||||
setfacl -m g:44444:rw file-root
|
||||
getfacl file-root
|
||||
setpriv $SET_GID setfattr -n "user.test4" -v "Success" file-root
|
||||
setpriv $SET_GID getfattr -d file-root
|
||||
|
||||
echo "== inheritance / default acl"
|
||||
rm -rf dir-root2
|
||||
mkdir dir-root2
|
||||
L dir-root2
|
||||
setpriv $SET_UID mkdir dir-root2/dir
|
||||
setpriv $SET_UID touch dir-root2/dir/file
|
||||
setfacl -m d:u:22222:rwx dir-root2
|
||||
getfacl dir-root2
|
||||
setpriv $SET_UID mkdir dir-root2/dir
|
||||
setpriv $SET_UID touch dir-root2/dir/file
|
||||
setfacl -m u:22222:rwx dir-root2
|
||||
getfacl dir-root2
|
||||
setpriv $SET_UID mkdir dir-root2/dir
|
||||
setpriv $SET_UID touch dir-root2/dir/file
|
||||
L dir-root2/dir
|
||||
getfacl dir-root2/dir
|
||||
L dir-root2/dir/file
|
||||
getfacl dir-root2/dir/file
|
||||
|
||||
echo "== cleanup"
|
||||
|
||||
t_pass
|
||||
@@ -3,13 +3,13 @@
|
||||
# operations in one mount and verify the results in another.
|
||||
#
|
||||
|
||||
t_require_commands getfattr setfattr dd filefrag diff touch stat scoutfs
|
||||
t_require_commands getfattr setfattr dd diff touch stat scoutfs
|
||||
t_require_mounts 2
|
||||
|
||||
GETFATTR="getfattr --absolute-names"
|
||||
SETFATTR="setfattr"
|
||||
DD="dd status=none"
|
||||
FILEFRAG="filefrag -v -b4096"
|
||||
FIEMAP="scoutfs get-fiemap"
|
||||
|
||||
echo "== root inode updates flow back and forth"
|
||||
sleep 1
|
||||
@@ -55,8 +55,8 @@ for i in $(seq 1 10); do
|
||||
conv=notrunc oflag=append &
|
||||
wait
|
||||
done
|
||||
$FILEFRAG "$T_D0/file" | t_filter_fs > "$T_TMP.0"
|
||||
$FILEFRAG "$T_D1/file" | t_filter_fs > "$T_TMP.1"
|
||||
$FIEMAP "$T_D0/file" > "$T_TMP.0"
|
||||
$FIEMAP "$T_D1/file" > "$T_TMP.1"
|
||||
diff -u "$T_TMP.0" "$T_TMP.1"
|
||||
|
||||
echo "== unlinked file isn't found"
|
||||
@@ -210,4 +210,7 @@ done
|
||||
wait
|
||||
ls "$T_D0/concurrent"
|
||||
|
||||
echo "== cleanup"
|
||||
rm -f "$T_TMP.0" "$T_TMP.1"
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -11,7 +11,7 @@ FILE="$T_D0/file"
|
||||
# final block as we truncated past it.
|
||||
#
|
||||
echo "== truncate writes zeroed partial end of file block"
|
||||
yes | dd of="$FILE" bs=8K count=1 status=none iflag=fullblock
|
||||
yes 2>/dev/null | dd of="$FILE" bs=8K count=1 status=none iflag=fullblock
|
||||
sync
|
||||
|
||||
# not passing iflag=fullblock causes the file occasionally to just be
|
||||
|
||||
@@ -11,7 +11,7 @@ truncate -s $sz "$T_TMP.equal"
|
||||
truncate -s $large_sz "$T_TMP.large"
|
||||
|
||||
echo "== make scratch fs"
|
||||
t_quiet scoutfs mkfs -f -Q 0,127.0.0.1,53000 "$T_EX_META_DEV" "$T_EX_DATA_DEV"
|
||||
t_quiet scoutfs mkfs -f -Q 0,127.0.0.1,$T_SCRATCH_PORT "$T_EX_META_DEV" "$T_EX_DATA_DEV"
|
||||
SCR="$T_TMPDIR/mnt.scratch"
|
||||
mkdir -p "$SCR"
|
||||
|
||||
@@ -73,4 +73,7 @@ test "$large_tot" -gt "$equal_tot" ; echo "resized larger test rc: $?"
|
||||
umount "$SCR"
|
||||
losetup -d "$scr_loop"
|
||||
|
||||
echo "== cleanup"
|
||||
rm -f "$T_TMP.small" "$T_TMP.equal" "$T_TMP.large"
|
||||
|
||||
t_pass
|
||||
|
||||
@@ -28,7 +28,7 @@ while [ "$SECONDS" -lt "$END" ]; do
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$i" -ge "$quorum_nr" ]; then
|
||||
t_umount $i &
|
||||
echo "umount $i pid $pid quo $quorum_nr" \
|
||||
echo "umount $i rid $rid quo $quorum_nr" \
|
||||
>> $T_TMP.log
|
||||
mounted[$i]=0
|
||||
fi
|
||||
@@ -53,6 +53,9 @@ while [ "$SECONDS" -lt "$END" ]; do
|
||||
|
||||
for i in "${lock_arr[@]}"; do
|
||||
if [[ ! " ${rid_arr[*]} " =~ " $i " ]]; then
|
||||
echo -e "RID($i) exists" >> $T_TMP.log
|
||||
echo -e "rid_arr:\n${rid_arr[@]}" >> $T_TMP.log
|
||||
echo -e "lock_arr:\n${lock_arr[@]}" >> $T_TMP.log
|
||||
t_fail "RID($i): exists when not mounted"
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Test clustered parallel createmany
|
||||
#
|
||||
|
||||
t_require_commands mkdir createmany
|
||||
t_require_commands mkdir createmany bc
|
||||
t_require_mounts 2
|
||||
|
||||
COUNT=50000
|
||||
@@ -17,14 +17,14 @@ mkdir -p $T_D0/dir/0
|
||||
mkdir $T_D1/dir/1
|
||||
|
||||
echo "== measure initial createmany"
|
||||
START=$SECONDS
|
||||
START=$(date +%s.%N)
|
||||
createmany -o "$T_D0/file_" $COUNT >> $T_TMP.full
|
||||
sync
|
||||
SINGLE=$((SECONDS - START))
|
||||
echo single $SINGLE >> $T_TMP.full
|
||||
END=$(date +%s.%N)
|
||||
SINGLE=$(echo "$END - $START" | bc)
|
||||
|
||||
echo "== measure two concurrent createmany runs"
|
||||
START=$SECONDS
|
||||
START=$(date +%s.%N)
|
||||
(cd $T_D0/dir/0; createmany -o ./file_ $COUNT > /dev/null) &
|
||||
pids="$!"
|
||||
(cd $T_D1/dir/1; createmany -o ./file_ $COUNT > /dev/null) &
|
||||
@@ -33,7 +33,9 @@ for p in $pids; do
|
||||
wait $p
|
||||
done
|
||||
sync
|
||||
BOTH=$((SECONDS - START))
|
||||
END=$(date +%s.%N)
|
||||
BOTH=$(echo "$END - $START" | bc)
|
||||
|
||||
echo both $BOTH >> $T_TMP.full
|
||||
|
||||
# Multi node still adds significant overhead, even with our CW locks
|
||||
@@ -44,7 +46,7 @@ echo both $BOTH >> $T_TMP.full
|
||||
# exceed this factor should the CW locked items go back to fully
|
||||
# synchronized operation.
|
||||
FACTOR=200
|
||||
if [ "$BOTH" -gt $(($SINGLE*$FACTOR)) ]; then
|
||||
if [ $(echo "$BOTH > ( $SINGLE * $FACTOR )" | bc) == "1" ]; then
|
||||
t_fail "both createmany took $BOTH sec, more than $FACTOR x single $SINGLE sec"
|
||||
fi
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user