mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-09 13:23:14 +00:00
Compare commits
555 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
732637d372 | ||
|
|
963591cc9a | ||
|
|
ad79ee94f9 | ||
|
|
65ea250de9 | ||
|
|
86ca09ed7d | ||
|
|
5681920bfe | ||
|
|
6c2ccf75ea | ||
|
|
a818b9e461 | ||
|
|
b9f8eee59e | ||
|
|
d8fcbb9564 | ||
|
|
4d58252e1a | ||
|
|
293df47589 | ||
|
|
2a58e4c147 | ||
|
|
1b7917e063 | ||
|
|
4f9c3503c8 | ||
|
|
541cb47af0 | ||
|
|
d537365d0a | ||
|
|
7375627861 | ||
|
|
48d849e2f4 | ||
|
|
35bcad91a6 | ||
|
|
0b7b9d4a5e | ||
|
|
f86a7b4d3c | ||
|
|
96eb9662a1 | ||
|
|
47af90d078 | ||
|
|
669e37c636 | ||
|
|
bb3e1f3665 | ||
|
|
0d262de4ac | ||
|
|
70bd936213 | ||
|
|
3f786596e0 | ||
|
|
cad47ed1ed | ||
|
|
e088424d70 | ||
|
|
d0cf026298 | ||
|
|
03fa1ce7c5 | ||
|
|
3d9f10de93 | ||
|
|
9741d40e10 | ||
|
|
48ac7bdf7c | ||
|
|
7865ee9f54 | ||
|
|
624eb128c6 | ||
|
|
091eb3b683 | ||
|
|
04e8cc6295 | ||
|
|
0f6fdb3eb5 | ||
|
|
2f48a606e8 | ||
|
|
377e49caf1 | ||
|
|
d08eb66adc | ||
|
|
6f19d0bd36 | ||
|
|
1d0cde7cc3 | ||
|
|
138c7c6b49 | ||
|
|
8aa1a98901 | ||
|
|
888b1394a6 | ||
|
|
e457694f19 | ||
|
|
459de5b478 | ||
|
|
24031cde1d | ||
|
|
04cc41719c | ||
|
|
1b47e9429e | ||
|
|
7ea084082d | ||
|
|
f565451f76 | ||
|
|
05f14640fb | ||
|
|
609fc56cd6 | ||
|
|
a4b5a256eb | ||
|
|
f701ce104c | ||
|
|
c6dab3c306 | ||
|
|
e3e2cfceec | ||
|
|
5a10c79409 | ||
|
|
e9d147260c | ||
|
|
6c85879489 | ||
|
|
8b76a53cf3 | ||
|
|
e76a171c40 | ||
|
|
8cb08507d6 | ||
|
|
cad12d5ce8 | ||
|
|
e59a5f8ebd | ||
|
|
1bcd1d4d00 | ||
|
|
b944f609aa | ||
|
|
519b47a53c | ||
|
|
92f704d35a | ||
|
|
311bf75902 | ||
|
|
3788d67101 | ||
|
|
b7a3d03711 | ||
|
|
295f751aed | ||
|
|
7f6032d9b4 | ||
|
|
7e3a6537ec | ||
|
|
49b7b70438 | ||
|
|
de0fdd1f9f | ||
|
|
a6d7de3c00 | ||
|
|
2c2c127c5e | ||
|
|
9491c784e7 | ||
|
|
c3b30930fa | ||
|
|
e7e46a80e6 | ||
|
|
1ddf752f42 | ||
|
|
14b65c6360 | ||
|
|
934f6c7648 | ||
|
|
a88972b50e | ||
|
|
3e71f49260 | ||
|
|
8a082e3f99 | ||
|
|
110d5ea0d5 | ||
|
|
669de459a7 | ||
|
|
621271f8cf | ||
|
|
d1092cdbe9 | ||
|
|
aed7169fac | ||
|
|
7f313f2818 | ||
|
|
6b4e666952 | ||
|
|
4a26059d00 | ||
|
|
19e78c32fc | ||
|
|
8c1a45c9f5 | ||
|
|
5a6eb569f3 | ||
|
|
69d9040e68 | ||
|
|
d94ec29ffa | ||
|
|
70c36ae394 | ||
|
|
1d08a58add | ||
|
|
fc7876e844 | ||
|
|
5337b9e221 | ||
|
|
8a22bdd366 | ||
|
|
235ab133a7 | ||
|
|
9335d2eb86 | ||
|
|
97b081de3f | ||
|
|
21b5032365 | ||
|
|
4723f4f9ab | ||
|
|
0a8b3f4e94 | ||
|
|
8a4b0967cb | ||
|
|
606c519e96 | ||
|
|
7d0e7e29f8 | ||
|
|
69de6d7a74 | ||
|
|
ac00f5cedb | ||
|
|
6d42d260cf | ||
|
|
00ebe92186 | ||
|
|
570c05898c | ||
|
|
c298360a49 | ||
|
|
95f4e56546 | ||
|
|
d5c2768f04 | ||
|
|
676d429264 | ||
|
|
5b260e6b54 | ||
|
|
e2b06f2c92 | ||
|
|
546b437df7 | ||
|
|
381f4543b7 | ||
|
|
418a441604 | ||
|
|
f3abf9710b | ||
|
|
8a45c2baff | ||
|
|
345ebd0876 | ||
|
|
b718cf09de | ||
|
|
e4721366ff | ||
|
|
4ef64c6fcf | ||
|
|
2d58ee2a37 | ||
|
|
1f0dd7f025 | ||
|
|
077468ac1e | ||
|
|
c951713ab2 | ||
|
|
ad82a5e52a | ||
|
|
d3c5328909 | ||
|
|
c30172210f | ||
|
|
19af6e28fb | ||
|
|
8885486bc8 | ||
|
|
0204e092e4 | ||
|
|
3b816cfd01 | ||
|
|
b45fbe0bbb | ||
|
|
c5d9b93b96 | ||
|
|
2984f4d3a8 | ||
|
|
3b8d2eab8e | ||
|
|
4dde57dc27 | ||
|
|
a4be74f4b1 | ||
|
|
b66e52f3f8 | ||
|
|
fb93d82b1e | ||
|
|
9d8ac2c7d7 | ||
|
|
49acbb4415 | ||
|
|
7b039a1d18 | ||
|
|
ccd65b9a61 | ||
|
|
aeb1dbc5f5 | ||
|
|
e20d3ae1e8 | ||
|
|
3228749957 | ||
|
|
db445ce517 | ||
|
|
bb5d98730b | ||
|
|
cb0838a0ef | ||
|
|
7eaed848ed | ||
|
|
267c1cc2d5 | ||
|
|
c6b92329b3 | ||
|
|
91e7f051cf | ||
|
|
7645f04363 | ||
|
|
8c06302984 | ||
|
|
1bc83e9e2d | ||
|
|
38c6d66ffc | ||
|
|
6a17dc335f | ||
|
|
e0bb6ca481 | ||
|
|
38e6f11ee4 | ||
|
|
442980f1c9 | ||
|
|
82c2d0b1d0 | ||
|
|
4a8240748e | ||
|
|
60ca950f42 | ||
|
|
9c45e8b7ef | ||
|
|
ee9e8c3e1a | ||
|
|
5f156b7a36 | ||
|
|
3a51ca369b | ||
|
|
460f3ce503 | ||
|
|
fb5331a1d9 | ||
|
|
5a53e7144d | ||
|
|
a23877b150 | ||
|
|
5ccdf3c9f0 | ||
|
|
270726a6ea | ||
|
|
de304628ea | ||
|
|
6a99ca9ede | ||
|
|
0521bd0e6b | ||
|
|
361491846d | ||
|
|
9ba4271c26 | ||
|
|
90cfaf17d1 | ||
|
|
6931cb7b0e | ||
|
|
7d4db05445 | ||
|
|
7b71250072 | ||
|
|
8e37be279c | ||
|
|
d6642da44d | ||
|
|
4b87045447 | ||
|
|
3f773a8594 | ||
|
|
c385eea9a1 | ||
|
|
c296bc1959 | ||
|
|
3052feac29 | ||
|
|
1fa0d7727c | ||
|
|
2af6f47c8b | ||
|
|
6db69b7a4f | ||
|
|
8ca1f1994d | ||
|
|
48716461e4 | ||
|
|
965b692bdc | ||
|
|
c3c4b08038 | ||
|
|
0519830229 | ||
|
|
4d6e1a14ae | ||
|
|
fc3e061ea8 | ||
|
|
a4bc3fb27d | ||
|
|
67990a7007 | ||
|
|
ba819be8f9 | ||
|
|
1b103184ca | ||
|
|
c3890abd7b | ||
|
|
5ab38bfa48 | ||
|
|
e9ad61b444 | ||
|
|
91bbf90f71 | ||
|
|
b5630f540d | ||
|
|
90a4c82363 | ||
|
|
f654fa0fda | ||
|
|
50168a2d2a | ||
|
|
3c0616524a | ||
|
|
8d3e6883c6 | ||
|
|
8747dae61c | ||
|
|
fffcf4a9bb | ||
|
|
b552406427 | ||
|
|
d812599e6b | ||
|
|
03ab5cedb6 | ||
|
|
2b94cd6468 | ||
|
|
5507ee5351 | ||
|
|
1600a121d9 | ||
|
|
6daf24ff37 | ||
|
|
cd5d9ff3e0 | ||
|
|
d94e49eb63 | ||
|
|
1dbe408539 | ||
|
|
bf21699ad7 | ||
|
|
c7c67a173d | ||
|
|
0d10189f58 | ||
|
|
6b88f3268e | ||
|
|
4b2afa61b8 | ||
|
|
222ba2cede | ||
|
|
c7e97eeb1f | ||
|
|
21c070b42d | ||
|
|
77fbf92968 | ||
|
|
d5c699c3b4 | ||
|
|
b56b8e502c | ||
|
|
5ff372561d | ||
|
|
bdecee5e5d | ||
|
|
a9281b75fa | ||
|
|
707e1b2d59 | ||
|
|
006f429f72 | ||
|
|
d71583bcf5 | ||
|
|
bb835b948d | ||
|
|
bcdc4f5423 | ||
|
|
7ceb215c91 | ||
|
|
d4d2b0850b | ||
|
|
cf05aefe50 | ||
|
|
9f06065ce7 | ||
|
|
d2c2fece2a | ||
|
|
0e1e55d25b | ||
|
|
293cee9554 | ||
|
|
a7704e0b56 | ||
|
|
819df4be60 | ||
|
|
592e3d471f | ||
|
|
29160b0bc6 | ||
|
|
11c041d2ea | ||
|
|
46e8dfe884 | ||
|
|
a9beeaf5da | ||
|
|
205d8ebd4a | ||
|
|
e580f33f82 | ||
|
|
d480243c11 | ||
|
|
bafecbc604 | ||
|
|
65be4682e3 | ||
|
|
e88845d185 | ||
|
|
ec50e66fff | ||
|
|
0e91f9a277 | ||
|
|
69068ae2c0 | ||
|
|
016dac39bf | ||
|
|
e69cf3dec8 | ||
|
|
d6c143a639 | ||
|
|
09ae100254 | ||
|
|
50f5077863 | ||
|
|
cca4fcb788 | ||
|
|
1d150da3f0 | ||
|
|
28f03d3558 | ||
|
|
4275f6e6e5 | ||
|
|
70a5b6ffe2 | ||
|
|
b89ecd47b4 | ||
|
|
4293816764 | ||
|
|
f0de59a9a3 | ||
|
|
1f0a08eacb | ||
|
|
dac3f056a5 | ||
|
|
af868aad9b | ||
|
|
cf4df0ef9f | ||
|
|
81aa58253e | ||
|
|
c683ded0e6 | ||
|
|
f27431b3ae | ||
|
|
28c3cee995 | ||
|
|
430960ef3c | ||
|
|
7006a84d96 | ||
|
|
eafb8621da | ||
|
|
006555d42a | ||
|
|
8e458f9230 | ||
|
|
32c0dbce09 | ||
|
|
9c9ba651bd | ||
|
|
14eddb6420 | ||
|
|
597208324d | ||
|
|
8596c9ad45 | ||
|
|
8a705ea380 | ||
|
|
4784ccdfd5 | ||
|
|
778c2769df | ||
|
|
9e3529060e | ||
|
|
1672b3ecec | ||
|
|
55f9435fad | ||
|
|
072f6868d3 | ||
|
|
8a64b46a2f | ||
|
|
14901c39aa | ||
|
|
e095127ae9 | ||
|
|
a9da27444f | ||
|
|
49fe89741d | ||
|
|
847916860d | ||
|
|
564b942ead | ||
|
|
3d99fda0f6 | ||
|
|
6c0ab75477 | ||
|
|
89b238a5c4 | ||
|
|
05371b83f0 | ||
|
|
acafb869e7 | ||
|
|
74c5fe1115 | ||
|
|
2279e9657f | ||
|
|
707752a7bf | ||
|
|
0316c22026 | ||
|
|
5a1e5639c2 | ||
|
|
950963375b | ||
|
|
e52435b993 | ||
|
|
2b72c57cb0 | ||
|
|
9c67b2a42d | ||
|
|
0b38aeb5a4 | ||
|
|
2daf873983 | ||
|
|
904c5dce90 | ||
|
|
57c6d78df8 | ||
|
|
74e9d0f764 | ||
|
|
98eb0eb649 | ||
|
|
15de0c21c1 | ||
|
|
7b65767803 | ||
|
|
46640e4ff9 | ||
|
|
912906f050 | ||
|
|
ec02cf442b | ||
|
|
0e9cd1eea5 | ||
|
|
e18ea24561 | ||
|
|
723309ff75 | ||
|
|
9bfad7d324 | ||
|
|
448e0abacb | ||
|
|
2a6d827e7a | ||
|
|
e7bd1b45dc | ||
|
|
6ded240089 | ||
|
|
99a20bc383 | ||
|
|
18903ce500 | ||
|
|
b76e22ffcf | ||
|
|
d6863d6832 | ||
|
|
bb01a3990f | ||
|
|
409631ceb1 | ||
|
|
f1264c7e47 | ||
|
|
a61b8d9961 | ||
|
|
eac57a1f7a | ||
|
|
5512d5c03e | ||
|
|
8cf7be4651 | ||
|
|
3363b4fb79 | ||
|
|
ddb5cce2a5 | ||
|
|
1b0e9c45f4 | ||
|
|
2e2ccb6f61 | ||
|
|
01c8bba56d | ||
|
|
17cb1fe84b | ||
|
|
78ae87031b | ||
|
|
bf93ea73c4 | ||
|
|
a23e7478a0 | ||
|
|
9ba2ee5c88 | ||
|
|
fe33a492c2 | ||
|
|
77c0ff89fb | ||
|
|
7c2d83e2f8 | ||
|
|
40aa47c888 | ||
|
|
c1bd7bcce5 | ||
|
|
7720222588 | ||
|
|
fff07ce19c | ||
|
|
464de56d28 | ||
|
|
342c206550 | ||
|
|
fe4734d019 | ||
|
|
b1a43bb312 | ||
|
|
929703213f | ||
|
|
78279ffb4a | ||
|
|
0b919e2ba7 | ||
|
|
bb5267f0c9 | ||
|
|
6d4916954b | ||
|
|
8e067b3d3f | ||
|
|
87500e8bb5 | ||
|
|
41174867ed | ||
|
|
276fbebdac | ||
|
|
03df993e14 | ||
|
|
701f1a9538 | ||
|
|
71ed4512dc | ||
|
|
57dff347a6 | ||
|
|
fb7cb057c4 | ||
|
|
1b924c501e | ||
|
|
aed4313995 | ||
|
|
61d86f7718 | ||
|
|
717b56698a | ||
|
|
c92a7ff705 | ||
|
|
d05489c670 | ||
|
|
4806e8a7b3 | ||
|
|
b74f3f577d | ||
|
|
d5ddf1ecac | ||
|
|
e27ea22fe4 | ||
|
|
51fe5a4ceb | ||
|
|
3847c4fe63 | ||
|
|
ef2daf8857 | ||
|
|
064409eb62 | ||
|
|
ddc5d9f04d | ||
|
|
433a80c6fc | ||
|
|
78405bb5fd | ||
|
|
98e514e5f4 | ||
|
|
29538a9f45 | ||
|
|
1826048ca3 | ||
|
|
798fbb793e | ||
|
|
d7b16419ef | ||
|
|
f13aba78b1 | ||
|
|
3220c2055c | ||
|
|
1cbc927ccb | ||
|
|
acb94dd9b7 | ||
|
|
233fbb39f3 | ||
|
|
198d3cda32 | ||
|
|
e8c64b4217 | ||
|
|
89b64ae1f7 | ||
|
|
fc8a5a1b5c | ||
|
|
d4c793e010 | ||
|
|
8a3058818c | ||
|
|
ba9a106f72 | ||
|
|
310725eb72 | ||
|
|
51a8236316 | ||
|
|
f3dd00895b | ||
|
|
49df98f5a8 | ||
|
|
15cf3c4134 | ||
|
|
1abe97351d | ||
|
|
f757e29915 | ||
|
|
31e474c5fa | ||
|
|
dcf8202d7c | ||
|
|
ae55fa3153 | ||
|
|
7f9f21317c | ||
|
|
0d4bf83da3 | ||
|
|
0a6b1fb304 | ||
|
|
fb7e43dd23 | ||
|
|
45d90a5ae4 | ||
|
|
48f1305a8a | ||
|
|
cd4d6502b8 | ||
|
|
dff366e1a4 | ||
|
|
ca526e2bc0 | ||
|
|
e423d42106 | ||
|
|
82d2be2e4a | ||
|
|
4102b760d0 | ||
|
|
65654ee7c0 | ||
|
|
b2d6ceeb9c | ||
|
|
d8231016f8 | ||
|
|
3c2b329675 | ||
|
|
96ad8dd510 | ||
|
|
44f38a31ec | ||
|
|
fb2ff753ad | ||
|
|
bb3db7e272 | ||
|
|
c94b072925 | ||
|
|
26ae9c6e04 | ||
|
|
c8d7221ec5 | ||
|
|
7fd03dc311 | ||
|
|
4e8a088cc5 | ||
|
|
9c751c1197 | ||
|
|
875583b7ef | ||
|
|
38e5aa77c4 | ||
|
|
57a1d75e52 | ||
|
|
51d19d797f | ||
|
|
029a684c25 | ||
|
|
f2679d9598 | ||
|
|
bddca171ee | ||
|
|
18171b8543 | ||
|
|
d846eec5e8 | ||
|
|
e2c90339c5 | ||
|
|
4a0b14a4f2 | ||
|
|
90518a0fbd | ||
|
|
cd23cc61ca | ||
|
|
a67ea30bb7 | ||
|
|
f3b7c683f0 | ||
|
|
8decc54467 | ||
|
|
5adcf7677f | ||
|
|
07f03d499f | ||
|
|
c5068efef0 | ||
|
|
66678dc63b | ||
|
|
b2834d3c28 | ||
|
|
cff50bec6b | ||
|
|
4d6350b3b0 | ||
|
|
48966b42bb | ||
|
|
97cb8ad50d | ||
|
|
ae08a797ae | ||
|
|
2634fadfcb | ||
|
|
0c1f19556d | ||
|
|
19caae3da8 | ||
|
|
2989afbf46 | ||
|
|
730a84af92 | ||
|
|
5b77133c3b | ||
|
|
329ac0347d | ||
|
|
15d7eec1f9 | ||
|
|
cff17a4cae | ||
|
|
9fa2c6af89 | ||
|
|
e067961714 | ||
|
|
7a96e03148 | ||
|
|
e9b3cc873a | ||
|
|
5f2259c48f | ||
|
|
e14912974d | ||
|
|
813ce24d79 | ||
|
|
e2ce5ab6da | ||
|
|
89ca903c41 | ||
|
|
e3c7e21c40 | ||
|
|
e97ea5407d | ||
|
|
8db5c118c3 | ||
|
|
61ad844891 | ||
|
|
2c8f5d8fc1 | ||
|
|
8a504cd5ae | ||
|
|
99a1cc704f | ||
|
|
166ab58b99 | ||
|
|
8bc1ee8346 | ||
|
|
285b68879a | ||
|
|
1ac3efe701 | ||
|
|
ce76682db7 | ||
|
|
686f8515bc | ||
|
|
93bc52cc54 | ||
|
|
1108d1288a | ||
|
|
0abcd5a004 | ||
|
|
888ad8ec5c | ||
|
|
16ea0ef671 | ||
|
|
1b8e3f7c05 | ||
|
|
3ae0ebd0d8 | ||
|
|
714b7f2a84 | ||
|
|
945f8b4828 | ||
|
|
95f2a87864 | ||
|
|
38ee2defd5 | ||
|
|
0fc8ccb122 | ||
|
|
e4a3c2b95d | ||
|
|
cf4e6611d3 | ||
|
|
65429a9cc4 | ||
|
|
83a6bbb640 |
413
ReleaseNotes.md
413
ReleaseNotes.md
@@ -1,6 +1,419 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.25
|
||||
\
|
||||
*Jun 3, 2025*
|
||||
|
||||
Fix a bug that could cause indefinite retries of failed client commits.
|
||||
Under specific error conditions the client and server's understanding of
|
||||
the current client commit could get out of sync. The client would retry
|
||||
commits indefinitely that could never succeed. This manifested as
|
||||
infinite "critical transaction commit failure" messages in the kernel
|
||||
log on the client and matching "error <nr> committing client logs" on
|
||||
the server.
|
||||
|
||||
Fix a bug in a specific case of server error handling that could result
|
||||
in sending references to unwritten blocks to the client. The client
|
||||
would try to read blocks that hadn't been written and return spurious
|
||||
errors. This was seen under low free space conditions on the server and
|
||||
resulted in error messages with error code 116 (The errno enum for
|
||||
ESTALE, the client's indication that it couldn't read the blocks that it
|
||||
expected.)
|
||||
|
||||
---
|
||||
v1.24
|
||||
\
|
||||
*Mar 14, 2025*
|
||||
|
||||
Add support for coherent read and write mmap() mappings of regular file
|
||||
data between mounts.
|
||||
|
||||
Fix a bug that was causing scoutfs utilities to parse and change some
|
||||
file names before passing them on to the kernel for processing. This
|
||||
fixes spurious scoutfs command errors for files with the offending
|
||||
patterns in their names.
|
||||
|
||||
Fix a bug where rename wasn't updating the ctime of the inode at the
|
||||
destination name if it existed.
|
||||
|
||||
---
|
||||
v1.23
|
||||
\
|
||||
*Dec 11, 2024*
|
||||
|
||||
Add support for kernels in the RHEL 9.5 minor release.
|
||||
|
||||
---
|
||||
v1.22
|
||||
\
|
||||
*Nov 1, 2024*
|
||||
|
||||
Add support for building against the RHEL9 family of kernels.
|
||||
|
||||
Fix failure of the setattr\_more ioctl() to set the attributes of a
|
||||
zero-length file when restoring.
|
||||
|
||||
Fix support for POSIX ACLs in the RHEL8 and later family of kernels.
|
||||
|
||||
Fix a race condition in the lock server that could drop lock requests
|
||||
under heavy load and cause cluster lock attempts to hang.
|
||||
|
||||
---
|
||||
v1.21
|
||||
\
|
||||
*Jul 1, 2024*
|
||||
|
||||
This release adds features that rely on incompatible changes to
|
||||
structure the file system. The process of advancing the format version
|
||||
to enable these features is described in scoutfs(5).
|
||||
|
||||
Added the ".indx." extended attribute tag which can be used to determine
|
||||
the sorting of files in a global index.
|
||||
|
||||
Added ScoutFS quotas which let rules define file size and count limits
|
||||
in terms of ".totl." extended attribute totals.
|
||||
|
||||
Added the project ID file attribute which is inherited from parent
|
||||
directories on creation. ScoutFS quota rules can reference project IDs.
|
||||
|
||||
Add a retention attribute for files which prevents modification once
|
||||
enabled.
|
||||
|
||||
---
|
||||
v1.20
|
||||
\
|
||||
*Apr 22, 2024*
|
||||
|
||||
Minor changes to packaging to better support "weak" module linking of
|
||||
the kernel module, and to including git hashes in the built package. No
|
||||
changes in runtime behaviour.
|
||||
|
||||
---
|
||||
v1.19
|
||||
\
|
||||
*Jan 30, 2024*
|
||||
|
||||
Added the log\_merge\_wait\_timeout\_ms mount option to set the timeout
|
||||
for creating log merge operations. The previous timeout, now the
|
||||
default, was too short for some systems and was resulting in consistent
|
||||
timeouts which created an excessive number of log trees waiting to be
|
||||
merged.
|
||||
|
||||
Improved performance of many in-mount server operations when there are a
|
||||
large number of log trees waiting to be merged.
|
||||
|
||||
---
|
||||
v1.18
|
||||
\
|
||||
*Nov 7, 2023*
|
||||
|
||||
Fixed a bug where background srch file compaction could stop making
|
||||
forward progress if a partial compaction operation was committed at a
|
||||
specific byte offset in a block. This would cause srch file searches to
|
||||
be progressively more expensive over time. Once this fix is running
|
||||
background compaction will resume, bringing the cost of searches back
|
||||
down.
|
||||
|
||||
---
|
||||
v1.17
|
||||
\
|
||||
*Oct 23, 2023*
|
||||
|
||||
Add support for EL8 generation kernels.
|
||||
|
||||
---
|
||||
v1.16
|
||||
\
|
||||
*Oct 4, 2023*
|
||||
|
||||
Fix an issue where the server could hang on startup if its persistent
|
||||
allocator structures were left in a specific degraded state by the
|
||||
previously active server.
|
||||
|
||||
---
|
||||
v1.15
|
||||
\
|
||||
*Jul 17, 2023*
|
||||
|
||||
Process log btree merge splicing in multiple commits. This prevents a
|
||||
rare case where pending log merge completions contain more work than can
|
||||
be done in a single server commit, causing the server to trigger an
|
||||
assert shortly after starting.
|
||||
|
||||
Fix spurious EINVAL from data writes when data\_prealloc\_contig\_only was
|
||||
set to 0.
|
||||
|
||||
---
|
||||
v1.14
|
||||
\
|
||||
*Jun 29, 2023*
|
||||
|
||||
Add get\_referring\_entries ioctl for getting directory entries that
|
||||
refer to an inode.
|
||||
|
||||
Fix excessive CPU use in the move\_blocks interface when moving a large
|
||||
number of extents.
|
||||
|
||||
Reduce fragmented data allocation when contig\_only prealloc is not in
|
||||
use by more consistently allocating multi-block extents within each
|
||||
aligned prealloc region.
|
||||
|
||||
Avoid rare deadlock in metadata block cache recalim under both heavy
|
||||
load and memory pressure.
|
||||
|
||||
Fix crash when using quorum\_heartbeat\_timeout\_ms mount option.
|
||||
|
||||
---
|
||||
v1.13
|
||||
\
|
||||
*May 19, 2023*
|
||||
|
||||
Add the quorum\_heartbeat\_timeout\_ms mount option to set the quorum
|
||||
heartbeat timeout.
|
||||
|
||||
Change some task prioritization and allocation behavior of the quorum
|
||||
agent to help reduce delays in sending and receiving heartbeat messages.
|
||||
|
||||
---
|
||||
v1.12
|
||||
\
|
||||
*Apr 17, 2023*
|
||||
|
||||
Add the prepare-empty-data-device scoutfs command. A data device can be
|
||||
unused when no files have data blocks, perhaps because they're archived
|
||||
and offline. In this case the data device can be swapped out for
|
||||
another device without changes to the metadata device.
|
||||
|
||||
Fix an oversight which limited inode timestamps to second granularity
|
||||
for some operations. All operations now record timestamps with full
|
||||
nanosecond precision.
|
||||
|
||||
Fix spurious ENOENT failures when renaming from other directories into
|
||||
the root directory.
|
||||
|
||||
---
|
||||
v1.11
|
||||
\
|
||||
*Feb 2, 2023*
|
||||
|
||||
Fixed a free extent processing error that could prevent mount from
|
||||
proceeding when free data extents were sufficiently fragmented. It now
|
||||
properly handle very fragmented free extent maps.
|
||||
|
||||
Fixed a statfs server processing race that could return spurious errors
|
||||
and shut down the server. With the race closed statfs processing is
|
||||
reliable.
|
||||
|
||||
Fixed a rare livelock in the move\_blocks ioctl. With the right
|
||||
relationship between ioctl arguments and eventual file extent items the
|
||||
core loop in the move\_blocks ioctl could get stuck looping on an extent
|
||||
item and never return. The loop exit conditions were fixed and the loop
|
||||
will always advance through all extents.
|
||||
|
||||
Changed the 'print' scoutfs commands to flush the block cache for the
|
||||
devices. It was inconvenient to expect cache flushing to be a separate
|
||||
step to ensure consistency with remote node writes.
|
||||
|
||||
---
|
||||
v1.10
|
||||
\
|
||||
*Dec 7, 2022*
|
||||
|
||||
Fixed a potential directory entry cache management deadlock that could
|
||||
occur when many nodes performed heavy metadata write loads across shared
|
||||
directories and their child subdirectories. The deadlock could halt
|
||||
invalidation progress on a node which could then stop use of locks that
|
||||
needed invalidation on that node which would result in almost all tasks
|
||||
hanging on those locks that would never make progress.
|
||||
|
||||
Fixed a circumstance where metadata change sequence index item
|
||||
modification could leave behind old stale metadata sequence items. The
|
||||
duplication case required concurrent metadata updates across mounts with
|
||||
particular open transaction patterns so the duplicate items are rare.
|
||||
They resulted in a small amount of additional load when walking change
|
||||
indexes but had no effect on correctness.
|
||||
|
||||
Fixed a rare case where sparse file extension might not write partial
|
||||
blocks of zeros which was found in testing. This required using
|
||||
truncate to extend files past file sizes that end in partial blocks
|
||||
along with the right transaction commit and memory reclaim patterns.
|
||||
This never affected regular non-sparse files nor files prepopulated with
|
||||
fallocate.
|
||||
|
||||
---
|
||||
v1.9
|
||||
\
|
||||
*Oct 29, 2022*
|
||||
|
||||
Fix VFS cached directory entry consistency verification that could cause
|
||||
spurious "no such file or directory" (ENOENT) errors from rename over
|
||||
NFS under certain conditions. The problem was only every with the
|
||||
consistency of in-memory cached dentry objects, persistent data was
|
||||
correct and eventual eviction of the bad cached objects would stop
|
||||
generating the errors.
|
||||
|
||||
---
|
||||
v1.8
|
||||
\
|
||||
*Oct 18, 2022*
|
||||
|
||||
Add support for Linux POSIX Access Control Lists, as described in
|
||||
acl(5). Mount options are added to enable ("acl") and disable ("noacl")
|
||||
support. The default is to support ACLs. ACLs are stored in the
|
||||
existing extended attribute scheme so adding support is does not require
|
||||
a format change.
|
||||
|
||||
Add options to control data extent preallocation. The default behavior
|
||||
does not change. The options can relax the limits on preallocation
|
||||
which will then trigger under more write patterns and increase the risk
|
||||
of preallocated space which is never used. The options are described in
|
||||
scoutfs(5).
|
||||
|
||||
---
|
||||
v1.7
|
||||
\
|
||||
*Aug 26, 2022*
|
||||
|
||||
* **Fixed possible persistent errors moving freed data extents**
|
||||
\
|
||||
Fixed a case where the server could hit persistent errors trying to
|
||||
move a client's freed extents in one commit. The client had to free
|
||||
a large number of extents that occupied distant positions in the
|
||||
global free extent btree. Very large fragmented files could cause
|
||||
this. The server now moves the freed extents in multiple commits and
|
||||
can always ensure forward progress.
|
||||
|
||||
* **Fixed possible persistent errors from freed duplicate extents**
|
||||
\
|
||||
Background orphan deletion wasn't properly synchronizing with
|
||||
foreground tasks deleting very large files. If a deletion took long
|
||||
enough then background deletion could also attempt to delete inode items
|
||||
while the deletion was making progress. This could create duplicate
|
||||
deletions of data extent items which causes the server to abort when
|
||||
it later discovers the duplicate extents as it merges free lists.
|
||||
|
||||
---
|
||||
v1.6
|
||||
\
|
||||
*Jul 7, 2022*
|
||||
|
||||
* **Fix memory leaks in rare corner cases**
|
||||
\
|
||||
Analysis tools found a few corner cases that leaked small structures,
|
||||
generally around error handling or startup and shutdown.
|
||||
|
||||
* **Add --skip-likely-huge scoutfs print command option**
|
||||
\
|
||||
Add an option to scoutfs print to reduce the size of the output
|
||||
so that it can be used to see system-wide metadata without being
|
||||
overwhelmed by file-level details.
|
||||
|
||||
---
|
||||
v1.5
|
||||
\
|
||||
*Jun 21, 2022*
|
||||
|
||||
* **Fix persistent error during server startup**
|
||||
\
|
||||
Fixed a case where the server would always hit a consistent error on
|
||||
seartup, preventing the system from mounting. This required a rare
|
||||
but valid state across the clients.
|
||||
|
||||
* **Fix a client hang that would lead to fencing**
|
||||
\
|
||||
The client module's use of in-kernel networking was missing annotation
|
||||
that could lead to communication hanging. The server would fence the
|
||||
client when it stopped communicating. This could be identified by the
|
||||
server fencing a client after it disconnected with no attempt by the
|
||||
client to reconnect.
|
||||
|
||||
---
|
||||
v1.4
|
||||
\
|
||||
*May 6, 2022*
|
||||
|
||||
* **Fix possible client crash during server failover**
|
||||
\
|
||||
Fixed a narrow window during server failover and lock recovery that
|
||||
could cause a client mount to believe that it had an inconsistent item
|
||||
cache and panic. This required very specific lock state and messaging
|
||||
patterns between multiple mounts and multiple servers which made it
|
||||
unlikely to occur in the field.
|
||||
|
||||
---
|
||||
v1.3
|
||||
\
|
||||
*Apr 7, 2022*
|
||||
|
||||
* **Fix rare server instability under heavy load**
|
||||
\
|
||||
Fixed a case of server instability under heavy load due to concurrent
|
||||
work fully exhausting metadata block allocation pools reserved for a
|
||||
single server transaction. This would cause brief interruption as the
|
||||
server shutdown and the next server started up and made progress as
|
||||
pending work was retried.
|
||||
|
||||
* **Fix slow fencing preventing server startup**
|
||||
\
|
||||
If a server had to process many fence requests with a slow fencing
|
||||
mechanism it could be interrupted before it finished. The server
|
||||
now makes sure heartbeat messages are sent while it is making progress
|
||||
on fencing requests so that other quorum members don't interrupt the
|
||||
process.
|
||||
|
||||
* **Performance improvement in getxattr and setxattr**
|
||||
\
|
||||
Kernel allocation patterns in the getxattr and setxattr
|
||||
implementations were causing significant contention between CPUs. Their
|
||||
allocation strategy was changed so that concurrent tasks can call these
|
||||
xattr methods without degrading performance.
|
||||
|
||||
---
|
||||
v1.2
|
||||
\
|
||||
*Mar 14, 2022*
|
||||
|
||||
* **Fix deadlock between fallocate() and read() system calls**
|
||||
\
|
||||
Fixed a lock inversion that could cause two tasks to deadlock if they
|
||||
performed fallocate() and read() on a file at the same time. The
|
||||
deadlock was uninterruptible so the machine needed to be rebooted. This
|
||||
was relatively rare as fallocate() is usually used to prepare files
|
||||
before they're used.
|
||||
|
||||
* **Fix instability from heavy file deletion workloads**
|
||||
\
|
||||
Fixed rare circumstances under which background file deletion cleanup
|
||||
tasks could try to delete a file while it is being deleted by another
|
||||
task. Heavy load across multiple nodes, either many files being deleted
|
||||
or large files being deleted, increased the chances of this happening.
|
||||
Heavy staging could cause this problem because staging can create many
|
||||
internal temporary files that need to be deleted.
|
||||
|
||||
---
|
||||
v1.1
|
||||
\
|
||||
*Feb 4, 2022*
|
||||
|
||||
|
||||
* **Add scoutfs(1) change-quorum-config command**
|
||||
\
|
||||
Add a change-quorum-config command to scoutfs(1) to change the quorum
|
||||
configuration stored in the metadata device while the file system is
|
||||
unmounted. This can be used to change the mounts that will
|
||||
participate in quorum and the IP addresses they use.
|
||||
|
||||
* **Fix Rare Risk of Item Cache Corruption**
|
||||
\
|
||||
Code review found a rare potential source of item cache corruption.
|
||||
If this happened it would look as though deleted parts of the filesystem
|
||||
returned, but only at the time they were deleted. Old deleted items are
|
||||
not affected. This problem only affected the item cache, never
|
||||
persistent storage. Unmounting and remounting would drop the bad item
|
||||
cache and resync it with the correct persistent data.
|
||||
|
||||
---
|
||||
v1.0
|
||||
\
|
||||
|
||||
@@ -5,24 +5,22 @@ ifeq ($(SK_KSRC),)
|
||||
SK_KSRC := $(shell echo /lib/modules/`uname -r`/build)
|
||||
endif
|
||||
|
||||
# fail if sparse fails if we find it
|
||||
ifeq ($(shell sparse && echo found),found)
|
||||
SP =
|
||||
else
|
||||
SP = @:
|
||||
endif
|
||||
|
||||
SCOUTFS_GIT_DESCRIBE := \
|
||||
SCOUTFS_GIT_DESCRIBE ?= \
|
||||
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
|
||||
echo no-git)
|
||||
|
||||
ESCAPED_GIT_DESCRIBE := \
|
||||
$(shell echo $(SCOUTFS_GIT_DESCRIBE) |sed -e 's/\//\\\//g')
|
||||
|
||||
RPM_GITHASH ?= $(shell git rev-parse --short HEAD)
|
||||
|
||||
SCOUTFS_ARGS := SCOUTFS_GIT_DESCRIBE=$(SCOUTFS_GIT_DESCRIBE) \
|
||||
RPM_GITHASH=$(RPM_GITHASH) \
|
||||
CONFIG_SCOUTFS_FS=m -C $(SK_KSRC) M=$(CURDIR)/src \
|
||||
EXTRA_CFLAGS="-Werror"
|
||||
|
||||
# - We use the git describe from tags to set up the RPM versioning
|
||||
RPM_VERSION := $(shell git describe --long --tags | awk -F '-' '{gsub(/^v/,""); print $$1}')
|
||||
RPM_GITHASH := $(shell git rev-parse --short HEAD)
|
||||
TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
|
||||
|
||||
@@ -31,17 +29,16 @@ TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
all: module
|
||||
|
||||
module:
|
||||
make $(SCOUTFS_ARGS)
|
||||
$(SP) make C=2 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
|
||||
$(MAKE) CHECK=$(CURDIR)/src/sparse-filtered.sh C=1 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
|
||||
modules_install:
|
||||
make $(SCOUTFS_ARGS) modules_install
|
||||
$(MAKE) $(SCOUTFS_ARGS) modules_install
|
||||
|
||||
|
||||
%.spec: %.spec.in .FORCE
|
||||
sed -e 's/@@VERSION@@/$(RPM_VERSION)/g' \
|
||||
-e 's/@@GITHASH@@/$(RPM_GITHASH)/g' < $< > $@+
|
||||
-e 's/@@GITHASH@@/$(RPM_GITHASH)/g' \
|
||||
-e 's/@@GITDESCRIBE@@/$(ESCAPED_GIT_DESCRIBE)/g' < $< > $@+
|
||||
mv $@+ $@
|
||||
|
||||
|
||||
@@ -50,4 +47,4 @@ dist: scoutfs-kmod.spec
|
||||
@ tar rf $(TARFILE) --transform="s@\(.*\)@scoutfs-kmod-$(RPM_VERSION)/\1@" scoutfs-kmod.spec
|
||||
|
||||
clean:
|
||||
make $(SCOUTFS_ARGS) clean
|
||||
$(MAKE) $(SCOUTFS_ARGS) clean
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
%define kmod_name scoutfs
|
||||
%define kmod_version @@VERSION@@
|
||||
%define kmod_git_hash @@GITHASH@@
|
||||
%define kmod_git_describe @@GITDESCRIBE@@
|
||||
%define pkg_date %(date +%%Y%%m%%d)
|
||||
|
||||
# take kernel version or default to uname -r
|
||||
%{!?kversion: %global kversion %(uname -r)}
|
||||
%global kernel_version %{kversion}
|
||||
|
||||
%if 0%{?el7}
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}.$(arch)
|
||||
%global kernel_release() %{kversion}
|
||||
%else
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}
|
||||
%endif
|
||||
|
||||
%{!?_release: %global _release 0.%{pkg_date}git%{kmod_git_hash}}
|
||||
|
||||
%if 0%{?el7}
|
||||
Name: %{kmod_name}
|
||||
%else
|
||||
Name: kmod-%{kmod_name}
|
||||
%endif
|
||||
Summary: %{kmod_name} kernel module
|
||||
Version: %{kmod_version}
|
||||
Release: %{_release}%{?dist}
|
||||
@@ -20,24 +28,42 @@ License: GPLv2
|
||||
Group: System/Kernel
|
||||
URL: http://scoutfs.org/
|
||||
|
||||
%if 0%{?el7}
|
||||
BuildRequires: %{kernel_module_package_buildreqs}
|
||||
BuildRequires: git
|
||||
%else
|
||||
BuildRequires: elfutils-libelf-devel
|
||||
%endif
|
||||
BuildRequires: kernel-devel-uname-r = %{kernel_version}
|
||||
BuildRequires: git
|
||||
BuildRequires: module-init-tools
|
||||
|
||||
ExclusiveArch: x86_64
|
||||
|
||||
Source: %{kmod_name}-kmod-%{kmod_version}.tar
|
||||
|
||||
%if 0%{?el7}
|
||||
# Build only for standard kernel variant(s); for debug packages, append "debug"
|
||||
# after "default" (separated by space)
|
||||
%kernel_module_package default
|
||||
%endif
|
||||
|
||||
# Disable the building of the debug package(s).
|
||||
%define debug_package %{nil}
|
||||
%global install_mod_dir extra/%{kmod_name}
|
||||
|
||||
%global install_mod_dir extra/%{name}
|
||||
%if ! 0%{?el7}
|
||||
%global flavors_to_build x86_64
|
||||
%endif
|
||||
|
||||
# el9 sanity: make sure we lock to the minor release we built for and block upgrades
|
||||
%{lua:
|
||||
if string.match(rpm.expand("%{dist}"), "%.el9") then
|
||||
rpm.define("el9 1")
|
||||
end
|
||||
}
|
||||
|
||||
%if 0%{?el9}
|
||||
%define release_major_minor 9.%{lua: print(rpm.expand("%{dist}"):match("%.el9_(%d)"))}
|
||||
Requires: system-release = %{release_major_minor}
|
||||
%endif
|
||||
|
||||
%description
|
||||
%{kmod_name} - kernel module
|
||||
@@ -57,7 +83,7 @@ echo "Building for kernel: %{kernel_version} flavors: '%{flavors_to_build}'"
|
||||
for flavor in %flavors_to_build; do
|
||||
rm -rf obj/$flavor
|
||||
cp -r source obj/$flavor
|
||||
make SK_KSRC=%{kernel_source $flavor} -C obj/$flavor module
|
||||
make RPM_GITHASH=%{kmod_git_hash} SCOUTFS_GIT_DESCRIBE=%{kmod_git_describe} SK_KSRC=%{kernel_source $flavor} -C obj/$flavor module
|
||||
done
|
||||
|
||||
%install
|
||||
@@ -66,7 +92,7 @@ export INSTALL_MOD_DIR=%{install_mod_dir}
|
||||
mkdir -p %{install_mod_dir}
|
||||
for flavor in %{flavors_to_build}; do
|
||||
export KSRC=%{kernel_source $flavor}
|
||||
export KVERSION=%{kernel_release $KSRC}
|
||||
export KVERSION=%{kversion}
|
||||
install -d $INSTALL_MOD_PATH/lib/modules/$KVERSION/%{install_mod_dir}
|
||||
cp $PWD/obj/$flavor/src/scoutfs.ko $INSTALL_MOD_PATH/lib/modules/$KVERSION/%{install_mod_dir}/
|
||||
done
|
||||
@@ -74,7 +100,23 @@ done
|
||||
# mark modules executable so that strip-to-file can strip them
|
||||
find %{buildroot} -type f -name \*.ko -exec %{__chmod} u+x \{\} \;
|
||||
|
||||
%if ! 0%{?el7}
|
||||
%files
|
||||
/lib/modules
|
||||
|
||||
%post
|
||||
echo /lib/modules/%{kversion}/%{install_mod_dir}/scoutfs.ko | weak-modules --add-modules --no-initramfs
|
||||
depmod -a
|
||||
%endif
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
%preun
|
||||
# stash our modules for postun cleanup
|
||||
SCOUTFS_RPM_NAME=$(rpm -q %{name} | grep "%{version}-%{release}")
|
||||
rpm -ql $SCOUTFS_RPM_NAME | grep '\.ko$' > /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
%postun
|
||||
cat /var/run/%{name}-modules-%{version}-%{release} | weak-modules --remove-modules --no-initramfs
|
||||
rm /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
@@ -8,6 +8,8 @@ CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
|
||||
-include $(src)/Makefile.kernelcompat
|
||||
|
||||
scoutfs-y += \
|
||||
acl.o \
|
||||
attr_x.o \
|
||||
avl.o \
|
||||
alloc.o \
|
||||
block.o \
|
||||
@@ -24,6 +26,7 @@ scoutfs-y += \
|
||||
inode.o \
|
||||
ioctl.o \
|
||||
item.o \
|
||||
kernelcompat.o \
|
||||
lock.o \
|
||||
lock_server.o \
|
||||
msg.o \
|
||||
@@ -32,6 +35,7 @@ scoutfs-y += \
|
||||
options.o \
|
||||
per_task.o \
|
||||
quorum.o \
|
||||
quota.o \
|
||||
recov.o \
|
||||
scoutfs_trace.o \
|
||||
server.o \
|
||||
@@ -40,10 +44,12 @@ scoutfs-y += \
|
||||
srch.o \
|
||||
super.o \
|
||||
sysfs.o \
|
||||
totl.o \
|
||||
trans.o \
|
||||
triggers.o \
|
||||
tseq.o \
|
||||
volopt.o \
|
||||
wkic.o \
|
||||
xattr.o
|
||||
|
||||
#
|
||||
|
||||
@@ -7,23 +7,13 @@
|
||||
ccflags-y += -include $(src)/kernelcompat.h
|
||||
|
||||
#
|
||||
# v3.10-rc6-21-gbb6f619b3a49
|
||||
# v3.18-rc2-19-gb5ae6b15bd73
|
||||
#
|
||||
# Folds d_materialise_unique into d_splice_alias. Note reversal
|
||||
# of arguments (Also note Documentation/filesystems/porting.rst)
|
||||
#
|
||||
# _readdir changes from fop->readdir() to fop->iterate() and from
|
||||
# filldir(dirent) to dir_emit(ctx).
|
||||
#
|
||||
ifneq (,$(shell grep 'iterate.*dir_context' include/linux/fs.h))
|
||||
ccflags-y += -DKC_ITERATE_DIR_CONTEXT
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.10-rc6-23-g5f99f4e79abc
|
||||
#
|
||||
# Helpers including dir_emit_dots() are added in the process of
|
||||
# switching dcache_readdir() from fop->readdir() to fop->iterate()
|
||||
#
|
||||
ifneq (,$(shell grep 'dir_emit_dots' include/linux/fs.h))
|
||||
ccflags-y += -DKC_DIR_EMIT_DOTS
|
||||
ifneq (,$(shell grep 'd_materialise_unique' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_MATERIALISE_UNIQUE=1
|
||||
endif
|
||||
|
||||
#
|
||||
@@ -34,3 +24,441 @@ endif
|
||||
ifneq (,$(shell grep 'FMODE_KABI_ITERATE' include/linux/fs.h))
|
||||
ccflags-y += -DKC_FMODE_KABI_ITERATE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.7-rc2-23-g0d4d717f2583
|
||||
#
|
||||
# Added user_ns argument to posix_acl_valid
|
||||
#
|
||||
ifneq (,$(shell grep 'posix_acl_valid.*user_namespace' include/linux/posix_acl.h))
|
||||
ccflags-y += -DKC_POSIX_ACL_VALID_USER_NS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.3-12296-g6d2052d188d9
|
||||
#
|
||||
# The RBCOMPUTE function is now passed an extra flag, and should return a bool
|
||||
# to indicate whether the propagated callback should stop or not.
|
||||
#
|
||||
ifneq (,$(shell grep 'static inline bool RBNAME.*_compute_max' include/linux/rbtree_augmented.h))
|
||||
ccflags-y += -DKC_RB_TREE_AUGMENTED_COMPUTE_MAX
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.13-25-g37bc15392a23
|
||||
#
|
||||
# Renames posix_acl_create to __posix_acl_create and provide some
|
||||
# new interfaces for creating ACLs
|
||||
#
|
||||
ifneq (,$(shell grep '__posix_acl_create' include/linux/posix_acl.h))
|
||||
ccflags-y += -DKC___POSIX_ACL_CREATE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.8-rc1-29-g31051c85b5e2
|
||||
#
|
||||
# inode_change_ok() removed - replace with setattr_prepare()
|
||||
# v5.11-rc4-7-g2f221d6f7b88 removes extern attribute
|
||||
#
|
||||
ifneq (,$(shell grep 'int setattr_prepare' include/linux/fs.h))
|
||||
ccflags-y += -DKC_SETATTR_PREPARE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.15-rc3-4-gae5e165d855d
|
||||
#
|
||||
# linux/iversion.h needs to manually be included for code that
|
||||
# manipulates this field.
|
||||
#
|
||||
ifneq (,$(shell grep -s 'define _LINUX_IVERSION_H' include/linux/iversion.h))
|
||||
ccflags-y += -DKC_NEED_LINUX_IVERSION_H=1
|
||||
endif
|
||||
|
||||
# v4.11-12447-g104b4e5139fe
|
||||
#
|
||||
# Renamed __percpu_counter_add to percpu_counter_add_batch to clarify
|
||||
# that the __ wasn't less safe, just took an extra parameter.
|
||||
#
|
||||
ifneq (,$(shell grep 'percpu_counter_add_batch' include/linux/percpu_counter.h))
|
||||
ccflags-y += -DKC_PERCPU_COUNTER_ADD_BATCH
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.11-4550-g7dea19f9ee63
|
||||
#
|
||||
# Introduced memalloc_nofs_{save,restore} preferred instead of _noio_.
|
||||
#
|
||||
ifneq (,$(shell grep 'memalloc_nofs_save' include/linux/sched/mm.h))
|
||||
ccflags-y += -DKC_MEMALLOC_NOFS_SAVE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.7-12414-g1eff9d322a44
|
||||
#
|
||||
# Renamed bi_rw to bi_opf to force old code to catch up. We use it as a
|
||||
# single switch between old and new bio structures.
|
||||
#
|
||||
ifneq (,$(shell grep 'bi_opf' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_BIO_BI_OPF
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.12-rc2-201-g4e4cbee93d56
|
||||
#
|
||||
# Moves to bi_status BLK_STS_ API instead of having a mix of error
|
||||
# end_io args or bi_error.
|
||||
#
|
||||
ifneq (,$(shell grep 'bi_status' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_BIO_BI_STATUS
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.11-8765-ga0b02131c5fc
|
||||
#
|
||||
# Remove the old ->shrink() API, ->{scan,count}_objects is preferred.
|
||||
#
|
||||
ifneq (,$(shell grep '(*shrink)' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_SHRINK
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4777-g6bec00352861
|
||||
#
|
||||
# backing_dev_info is removed from address_space. Instead we need to use
|
||||
# inode_to_bdi() inline from <backing-dev.h>.
|
||||
#
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_BACKING_DEV_INFO=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.3-9290-ge409de992e3e
|
||||
#
|
||||
# xattr handlers are now passed a struct that contains `flags`
|
||||
#
|
||||
ifneq (,$(shell grep 'int...get..const struct xattr_handler.*struct dentry.*dentry,' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_STRUCT_XATTR_HANDLER=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc1-1-g9b2c45d479d0
|
||||
#
|
||||
# kernel_getsockname() and kernel_getpeername dropped addrlen arg
|
||||
#
|
||||
ifneq (,$(shell grep 'kernel_getsockname.*,$$' include/linux/net.h))
|
||||
ccflags-y += -DKC_KERNEL_GETSOCKNAME_ADDRLEN=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.1-rc1-410-geeb1bd5c40ed
|
||||
#
|
||||
# Adds a struct net parameter to sock_create_kern
|
||||
#
|
||||
ifneq (,$(shell grep 'sock_create_kern.*struct net' include/linux/net.h))
|
||||
ccflags-y += -DKC_SOCK_CREATE_KERN_NET=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.17-rc6-7-g95582b008388
|
||||
#
|
||||
# Kernel has current_time(inode) to uniformly retreive timespec in the right unit
|
||||
#
|
||||
ifneq (,$(shell grep 'struct timespec64 current_time' include/linux/fs.h))
|
||||
ccflags-y += -DKC_CURRENT_TIME_INODE=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.9-12228-g530e9b76ae8f
|
||||
#
|
||||
# register_cpu_notifier and family were all removed and to be
|
||||
# replaced with cpuhp_* API calls.
|
||||
#
|
||||
ifneq (,$(shell grep 'define register_hotcpu_notifier' include/linux/cpu.h))
|
||||
ccflags-y += -DKC_CPU_NOTIFIER
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.14-rc8-130-gccad2365668f
|
||||
#
|
||||
# generic_file_buffered_write is removed, backport it
|
||||
#
|
||||
ifneq (,$(shell grep 'extern ssize_t generic_file_buffered_write' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_FILE_BUFFERED_WRITE=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-438-g8151b4c8bee4
|
||||
#
|
||||
# struct address_space_operations switches away from .readpages to .readahead
|
||||
#
|
||||
# RHEL has backported this feature all the way to RHEL8, as part of RHEL_KABI,
|
||||
# which means we need to detect this very precisely
|
||||
#
|
||||
ifneq (,$(shell grep 'readahead.*struct readahead_control' include/linux/fs.h))
|
||||
ccflags-y += -DKC_FILE_AOPS_READAHEAD
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.0-rc7-1743-g8436318205b9
|
||||
#
|
||||
# .aio_read and .aio_write no longer exist. All reads and writes now use the
|
||||
# .read_iter and .write_iter methods, or must implement .read and .write (which
|
||||
# we don't).
|
||||
#
|
||||
ifneq (,$(shell grep 'ssize_t.*aio_read' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_HAVE_FOP_AIO_READ=1
|
||||
endif
|
||||
|
||||
#
|
||||
# rhel7 has a custom inode_operations_wrapper struct that is discarded
|
||||
# entirely in favor of upstream structure since rhel8.
|
||||
#
|
||||
ifneq (,$(shell grep 'void.*follow_link.*struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_HAVE_RHEL_IOPS_WRAPPER=1
|
||||
endif
|
||||
|
||||
ifneq (,$(shell grep 'size_t.*ki_left;' include/linux/aio.h))
|
||||
ccflags-y += -DKC_LINUX_AIO_KI_LEFT=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.4-rc4-4-g98e9cb5711c6
|
||||
#
|
||||
# Introduces a new xattr_handler .name member that can be used to match the
|
||||
# entire field, instead of just a prefix. For these kernels, we must use
|
||||
# the new .name field instead.
|
||||
ifneq (,$(shell grep 'static inline const char .xattr_prefix' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_HANDLER_NAME=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-96-g342a72a33407
|
||||
#
|
||||
# Adds `typedef __u32 __bitwise blk_opf_t` to aid flag checking
|
||||
ifneq (,$(shell grep 'typedef __u32 __bitwise blk_opf_t' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_OPF_T=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.12-rc6-9-g4f0f586bf0c8
|
||||
#
|
||||
# list_sort cmp function takes const list_head args
|
||||
ifneq (,$(shell grep 'const struct list_head ., const struct list_head .' include/linux/list_sort.h))
|
||||
ccflags-y += -DKC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
endif
|
||||
|
||||
# v5.7-523-g88dca4ca5a93
|
||||
#
|
||||
# The pgprot argument to vmalloc is always PAGE_KERNEL, so it is removed.
|
||||
ifneq (,$(shell grep 'extern void .__vmalloc.unsigned long size, gfp_t gfp_mask, pgprot_t prot' include/linux/vmalloc.h))
|
||||
ccflags-y += -DKC_VMALLOC_PGPROT_T
|
||||
endif
|
||||
|
||||
# v6.2-rc1-18-g01beba7957a2
|
||||
#
|
||||
# fs: port inode_owner_or_capable() to mnt_idmap
|
||||
ifneq (,$(shell grep 'bool inode_owner_or_capable.struct user_namespace .mnt_userns' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_OWNER_OR_CAPABLE_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-5-g47291baa8ddf
|
||||
#
|
||||
# namei: make permission helpers idmapped mount aware
|
||||
ifneq (,$(shell grep 'int inode_permission.struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_PERMISSION_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-24-g549c7297717c
|
||||
#
|
||||
# fs: make helpers idmap mount aware
|
||||
# Enlarges the VFS API methods to include user namespace argument.
|
||||
ifneq (,$(shell grep 'int ..mknod. .struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-rc2-21-g07888c665b40
|
||||
#
|
||||
# Detect new style bio_alloc - pass bdev and opf.
|
||||
ifneq (,$(shell grep 'struct bio .bio_alloc.struct block_device .bdev' include/linux/bio.h))
|
||||
ccflags-y += -DKC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc4-53-gcddf8a2c4a82
|
||||
#
|
||||
# fiemap_prep() replaces fiemap_check_flags()
|
||||
ifneq (,$(shell grep -s 'int fiemap_prep.struct inode' include/linux/fiemap.h))
|
||||
ccflags-y += -DKC_FIEMAP_PREP
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-13043-g800ba29547e1
|
||||
#
|
||||
# generic_perform_write args use kiocb for passing filp and pos
|
||||
ifneq (,$(shell grep 'ssize_t generic_perform_write.struct kiocb ., struct iov_iter' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2496-g76ee0785f42a
|
||||
#
|
||||
# net: add sock_set_sndtimeo
|
||||
ifneq (,$(shell grep 'void sock_set_sndtimeo.struct sock' include/net/sock.h))
|
||||
ccflags-y += -DKC_SOCK_SET_SNDTIMEO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.8-rc4-1931-gba423fdaa589
|
||||
#
|
||||
# setsockopt functions are now passed a sockptr_t value instead of char*
|
||||
ifneq (,$(shell grep -s 'include .linux/sockptr.h.' include/linux/net.h))
|
||||
ccflags-y += -DKC_SETSOCKOPT_SOCKPTR_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2507-g71c48eb81c9e
|
||||
#
|
||||
# Adds a bunch of low level TCP sock parameter functions that we want to use.
|
||||
ifneq (,$(shell grep 'int tcp_sock_set_keepintvl' include/linux/tcp.h))
|
||||
ccflags-y += -DKC_HAVE_TCP_SET_SOCKFN
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc3-13-ga84d1169164b
|
||||
#
|
||||
# Fixes y2038 issues with struct timeval.
|
||||
ifneq (,$(shell grep -s '^struct __kernel_old_timeval .' include/uapi/linux/time_types.h))
|
||||
ccflags-y += -DKC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-52-ge33c267ab70d
|
||||
#
|
||||
# register_shrinker now requires a name, used for debug stats etc.
|
||||
ifneq (,$(shell grep 'int __printf.*register_shrinker.struct shrinker .shrinker,' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_NAME
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-246-gf132ab7d3ab0
|
||||
#
|
||||
# mpage_readpage() is now replaced with mpage_read_folio.
|
||||
ifneq (,$(shell grep 'int mpage_read_folio.struct folio .folio' include/linux/mpage.h))
|
||||
ccflags-y += -DKC_MPAGE_READ_FOLIO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-219-gb3992d1e2ebc
|
||||
#
|
||||
# block_write_begin() no longer is being passed aop_flags
|
||||
ifneq (,$(shell grep -C1 'int block_write_begin' include/linux/buffer_head.h | tail -n 2 | grep 'unsigned flags'))
|
||||
ccflags-y += -DKC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.0-rc6-9-g863f144f12ad
|
||||
#
|
||||
# the .tmpfile() vfs method calling convention changed and now a struct
|
||||
# file* is passed to this metiond instead of a dentry. The function also
|
||||
# should open the created file and call finish_open_simple() before returning.
|
||||
ifneq (,$(shell grep 'extern void d_tmpfile.struct dentry' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_TMPFILE_DENTRY
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-201-g0733ad800291
|
||||
#
|
||||
# New blk_mode_t replaces abuse of fmode_t
|
||||
ifneq (,$(shell grep 'typedef unsigned int __bitwise blk_mode_t' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_MODE_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-186-g2736e8eeb0cc
|
||||
#
|
||||
# Reworks FMODE_EXCL kludge and instead modifies the blkdev_put() call to pass in
|
||||
# the (exclusive) holder to implement FMODE_EXCL handling.
|
||||
ifneq (,$(shell grep 'blkdev_put.struct block_device .bdev, void .holder' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BLKDEV_PUT_HOLDER_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc4-163-g0d625446d0a4
|
||||
#
|
||||
# Entirely removes current->backing_dev_info to ultimately remove buffer_head
|
||||
# completely at some point.
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info;' include/linux/sched.h))
|
||||
ccflags-y += -DKC_CURRENT_BACKING_DEV_INFO
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.8-rc1-4-gf3a608827d1f
|
||||
#
|
||||
# adds bdev_file_open_by_path() and later in v6.8-rc1-30-ge97d06a46526 removes bdev_open_by_path()
|
||||
# which requires us to use the file method from now on.
|
||||
ifneq (,$(shell grep 'struct file.*bdev_file_open_by_path.const char.*path' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BDEV_FILE_OPEN_BY_PATH
|
||||
endif
|
||||
|
||||
# v4.0-rc7-1796-gfe0f07d08ee3
|
||||
#
|
||||
# direct-io changes modify inode_dio_done to now be called inode_dio_end
|
||||
ifneq (,$(shell grep 'void inode_dio_end.struct inode' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_DIO_END
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.0-6476-g3d3539018d2c
|
||||
#
|
||||
# page fault handlers return a bitmask vm_fault_t instead
|
||||
# Note: el8's header has a slightly modified prefix here
|
||||
ifneq (,$(shell grep 'typedef.*__bitwise unsigned.*int vm_fault_t' include/linux/mm_types.h))
|
||||
ccflags-y += -DKC_MM_VM_FAULT_T
|
||||
endif
|
||||
|
||||
# v3.19-499-gd83a08db5ba6
|
||||
#
|
||||
# .remap pages becomes obsolete
|
||||
ifneq (,$(shell grep 'int ..remap_pages..struct vm_area_struct' include/linux/mm.h))
|
||||
ccflags-y += -DKC_MM_REMAP_PAGES
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4742-g503c358cf192
|
||||
#
|
||||
# list_lru_shrink_count() and list_lru_shrink_walk() introduced
|
||||
#
|
||||
ifneq (,$(shell grep 'list_lru_shrink_count.*struct list_lru' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_SHRINK_COUNT_WALK
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4757-g3f97b163207c
|
||||
#
|
||||
# lru_list_walk_cb lru arg added
|
||||
#
|
||||
ifneq (,$(shell grep 'struct list_head \*item, spinlock_t \*lock, void \*cb_arg' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_WALK_CB_ITEM_LOCK
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.7-rc4-153-g0a97c01cd20b
|
||||
#
|
||||
# list_lru_{add,del} -> list_lru_{add,del}_obj
|
||||
#
|
||||
ifneq (,$(shell grep '^bool list_lru_add_obj' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_ADD_OBJ
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.12-rc6-227-gda0c02516c50
|
||||
#
|
||||
# lru_list_walk_cb lock arg removed
|
||||
#
|
||||
ifneq (,$(shell grep 'struct list_lru_one \*list, spinlock_t \*lock, void \*cb_arg' include/linux/list_lru.h))
|
||||
ccflags-y += -DKC_LIST_LRU_WALK_CB_LIST_LOCK
|
||||
endif
|
||||
|
||||
|
||||
377
kmod/src/acl.c
Normal file
377
kmod/src/acl.c
Normal file
@@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright (C) 2022 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "xattr.h"
|
||||
#include "acl.h"
|
||||
#include "inode.h"
|
||||
#include "trans.h"
|
||||
|
||||
/*
|
||||
* POSIX draft ACLs are stored as full xattr items with the entries
|
||||
* encoded as the kernel's posix_acl_xattr_{header,entry} value structs.
|
||||
*
|
||||
* They're accessed and modified via user facing synthetic xattrs, iops
|
||||
* calls from the kernel, during inode mode changes, and during inode
|
||||
* creation.
|
||||
*
|
||||
* ACL access devolves into xattr access which is relatively expensive
|
||||
* so we maintain the cached native form in the vfs inode. We drop the
|
||||
* cache in lock invalidation which means that cached acl access must
|
||||
* always be performed under cluster locking.
|
||||
*/
|
||||
|
||||
static int acl_xattr_name_len(int type, char **name, size_t *name_len)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
*name = XATTR_NAME_POSIX_ACL_ACCESS;
|
||||
if (name_len)
|
||||
*name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
*name = XATTR_NAME_POSIX_ACL_DEFAULT;
|
||||
if (name_len)
|
||||
*name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
char *value = NULL;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
|
||||
acl = get_cached_acl(inode, type);
|
||||
if (acl != ACL_NOT_CACHED)
|
||||
return acl;
|
||||
#endif
|
||||
|
||||
ret = acl_xattr_name_len(type, &name, NULL);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = scoutfs_xattr_get_locked(inode, name, NULL, 0, lock);
|
||||
if (ret > 0) {
|
||||
value = kzalloc(ret, GFP_NOFS);
|
||||
if (!value)
|
||||
ret = -ENOMEM;
|
||||
else
|
||||
ret = scoutfs_xattr_get_locked(inode, name, value, ret, lock);
|
||||
}
|
||||
if (ret > 0) {
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, ret);
|
||||
} else if (ret == -ENODATA || ret == 0) {
|
||||
acl = NULL;
|
||||
} else {
|
||||
acl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* can set null negative cache */
|
||||
if (!IS_ERR(acl))
|
||||
set_cached_acl(inode, type, acl);
|
||||
|
||||
kfree(value);
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct posix_acl *acl;
|
||||
int ret;
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &lock);
|
||||
if (ret < 0) {
|
||||
acl = ERR_PTR(ret);
|
||||
} else {
|
||||
acl = scoutfs_get_acl_locked(inode, type, lock);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
}
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has acquired the locks and dirtied the inode, they'll
|
||||
* update the inode item if we return 0.
|
||||
*/
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks)
|
||||
{
|
||||
static const struct scoutfs_xattr_prefix_tags tgs = {0,}; /* never scoutfs. prefix */
|
||||
bool set_mode = false;
|
||||
char *value = NULL;
|
||||
umode_t new_mode;
|
||||
size_t name_len;
|
||||
char *name;
|
||||
int size = 0;
|
||||
int ret;
|
||||
|
||||
ret = acl_xattr_name_len(type, &name, &name_len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
if (acl) {
|
||||
ret = posix_acl_update_mode(KC_VFS_INIT_NS
|
||||
inode, &new_mode, &acl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
set_mode = true;
|
||||
}
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
if (!S_ISDIR(inode->i_mode)) {
|
||||
ret = acl ? -EINVAL : 0;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (acl) {
|
||||
size = posix_acl_xattr_size(acl->a_count);
|
||||
value = kmalloc(size, GFP_NOFS);
|
||||
if (!value) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_xattr_set_locked(inode, name, name_len, value, size, 0, &tgs,
|
||||
lock, NULL, ind_locks);
|
||||
if (ret == 0 && set_mode) {
|
||||
inode->i_mode = new_mode;
|
||||
if (!value) {
|
||||
/* can be setting an acl that only affects mode, didn't need xattr */
|
||||
inode_inc_iversion(inode);
|
||||
inode->i_ctime = current_time(inode);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (!ret)
|
||||
set_cached_acl(inode, type, acl);
|
||||
|
||||
kfree(value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock) ?:
|
||||
scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret == 0) {
|
||||
ret = scoutfs_dirty_inode_item(inode, lock) ?:
|
||||
scoutfs_set_acl_locked(inode, acl, type, lock, &ind_locks);
|
||||
if (ret == 0)
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
}
|
||||
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
return ret;
|
||||
}
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *handler, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size)
|
||||
{
|
||||
int type = handler->flags;
|
||||
#else
|
||||
int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value, size_t size,
|
||||
int type)
|
||||
{
|
||||
#endif
|
||||
struct posix_acl *acl;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
acl = scoutfs_get_acl(dentry->d_inode, type);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl == NULL)
|
||||
return -ENODATA;
|
||||
|
||||
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
|
||||
posix_acl_release(acl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *handler,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags)
|
||||
{
|
||||
int type = handler->flags;
|
||||
#else
|
||||
int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size,
|
||||
int flags, int type)
|
||||
{
|
||||
#endif
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret;
|
||||
|
||||
if (!inode_owner_or_capable(KC_VFS_INIT_NS dentry->d_inode))
|
||||
return -EPERM;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (value) {
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, size);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
if (acl) {
|
||||
ret = kc_posix_acl_valid(&init_user_ns, acl);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_set_acl(dentry->d_inode, acl, type);
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply the parent's default acl to new inodes access acl and inherit
|
||||
* it as the default for new directories. The caller holds locks and a
|
||||
* transaction.
|
||||
*/
|
||||
int scoutfs_init_acl_locked(struct inode *inode, struct inode *dir,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *dir_lock,
|
||||
struct list_head *ind_locks)
|
||||
{
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (!S_ISLNK(inode->i_mode)) {
|
||||
if (IS_POSIXACL(dir)) {
|
||||
acl = scoutfs_get_acl_locked(dir, ACL_TYPE_DEFAULT, dir_lock);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
}
|
||||
|
||||
if (!acl)
|
||||
inode->i_mode &= ~current_umask();
|
||||
}
|
||||
|
||||
if (IS_POSIXACL(dir) && acl) {
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_DEFAULT,
|
||||
lock, ind_locks);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
ret = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret > 0)
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_ACCESS,
|
||||
lock, ind_locks);
|
||||
} else {
|
||||
cache_no_acl(inode);
|
||||
}
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the access ACL based on a newly set mode. If we return an
|
||||
* error then the xattr wasn't changed.
|
||||
*
|
||||
* Annoyingly, setattr_copy has logic that transforms the final set mode
|
||||
* that we want to use to update the acl. But we don't want to modify
|
||||
* the other inode fields while discovering the resulting mode. We're
|
||||
* relying on acl_chmod not caring about the transformation (currently
|
||||
* just clears sgid). It would be better if we could get the resulting
|
||||
* mode to give to acl_chmod without modifying the other inode fields.
|
||||
*
|
||||
* The caller has the inode mutex, a cluster lock, transaction, and will
|
||||
* update the inode item if we return success.
|
||||
*/
|
||||
int scoutfs_acl_chmod_locked(struct inode *inode, struct iattr *attr,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_POSIXACL(inode) || !(attr->ia_valid & ATTR_MODE))
|
||||
return 0;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
acl = scoutfs_get_acl_locked(inode, ACL_TYPE_ACCESS, lock);
|
||||
if (IS_ERR_OR_NULL(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
ret = __posix_acl_chmod(&acl, GFP_KERNEL, attr->ia_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_ACCESS, lock, ind_locks);
|
||||
posix_acl_release(acl);
|
||||
return ret;
|
||||
}
|
||||
29
kmod/src/acl.h
Normal file
29
kmod/src/acl.h
Normal file
@@ -0,0 +1,29 @@
|
||||
#ifndef _SCOUTFS_ACL_H_
|
||||
#define _SCOUTFS_ACL_H_
|
||||
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type);
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock);
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size);
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags);
|
||||
#else
|
||||
int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value, size_t size,
|
||||
int type);
|
||||
int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size,
|
||||
int flags, int type);
|
||||
#endif
|
||||
int scoutfs_acl_chmod_locked(struct inode *inode, struct iattr *attr,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
int scoutfs_init_acl_locked(struct inode *inode, struct inode *dir,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *dir_lock,
|
||||
struct list_head *ind_locks);
|
||||
#endif
|
||||
152
kmod/src/alloc.c
152
kmod/src/alloc.c
@@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
@@ -84,6 +85,50 @@ static u64 smallest_order_length(u64 len)
|
||||
return 1ULL << (free_extent_order(len) * 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Moving an extent between trees can dirty blocks in several ways. This
|
||||
* function calculates worst case number of blocks across these scenarions.
|
||||
* We treat the alloc and free counts independently, so the values below are
|
||||
* max(allocated, freed), not the sum.
|
||||
*
|
||||
* We track extents with two separate btree items: by block number and by size.
|
||||
*
|
||||
* If we're removing an extent from the btree (allocating), we can dirty
|
||||
* two blocks if the keys are in different leaves. If we wind up merging
|
||||
* leaves because we fall below the low water mark, we can wind up freeing
|
||||
* three leaves.
|
||||
*
|
||||
* That sequence is as follows, assuming the original keys are removed from
|
||||
* blocks A and B:
|
||||
*
|
||||
* Allocate new dirty A' and B'
|
||||
* Free old stable A and B
|
||||
* B' has fallen below the low water mark, so copy B' into A'
|
||||
* Free B'
|
||||
*
|
||||
* An extent insertion (freeing an extent) can dirty up to five distinct items
|
||||
* in the btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent:
|
||||
*
|
||||
* In the by-blkno portion of the btree, we can dirty (allocate for COW) up
|
||||
* to two blocks- either by merging adjacent extents, which can cause us to
|
||||
* join leaf blocks; or by an insertion that causes a split.
|
||||
*
|
||||
* In the by-size portion, we never merge extents, so normally we just dirty
|
||||
* a single item with a size insertion. But if we merged adjacent extents in
|
||||
* the by-blkno portion of the tree, we might be working with three by-sizex
|
||||
* items: removing the two old ones that were combined in the merge; and
|
||||
* adding the new one for the larger, merged size.
|
||||
*
|
||||
* Finally, dirtying the paths to these leaves can grow the tree and grow/shrink
|
||||
* neighbours at each level, so we multiply by the height of the tree after
|
||||
* accounting for a possible new level.
|
||||
*/
|
||||
static u32 extent_mod_blocks(u32 height)
|
||||
{
|
||||
return ((1 + height) * 3) * 5;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length order, largest first. The location key
|
||||
@@ -877,6 +922,13 @@ static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *r
|
||||
* -ENOENT is returned if we run out of extents in the source tree
|
||||
* before moving the total.
|
||||
*
|
||||
* If meta_budget is non-zero then -EINPROGRESS can be returned if the
|
||||
* the caller's budget is consumed in the allocator during this call
|
||||
* (though not necessarily by us, we don't have per-thread tracking of
|
||||
* allocator consumption :/). The call can still have made progress and
|
||||
* caller is expected commit the dirty trees and examining the resulting
|
||||
* modified trees to see if they need to continue moving extents.
|
||||
*
|
||||
* The caller can specify that extents in the source tree should first
|
||||
* be found based on their zone bitmaps. We'll first try to find
|
||||
* extents in the exclusive zones, then vacant zones, and then we'll
|
||||
@@ -891,7 +943,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks)
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
@@ -899,6 +951,8 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
u32 avail_start = 0;
|
||||
u32 freed_start = 0;
|
||||
u64 moved = 0;
|
||||
u64 count;
|
||||
int ret = 0;
|
||||
@@ -909,6 +963,9 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
vacant = NULL;
|
||||
}
|
||||
|
||||
if (meta_budget != 0)
|
||||
scoutfs_alloc_meta_remaining(alloc, &avail_start, &freed_start);
|
||||
|
||||
while (moved < total) {
|
||||
count = total - moved;
|
||||
|
||||
@@ -941,6 +998,24 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (meta_budget != 0 &&
|
||||
scoutfs_alloc_meta_low_since(alloc, avail_start, freed_start, meta_budget,
|
||||
extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
ret = -EINPROGRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
/* return partial if the server alloc can't dirty any more */
|
||||
if (scoutfs_alloc_meta_low(sb, alloc, 50 + extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
if (WARN_ON_ONCE(!moved))
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* searching set start/len, finish initializing alloced extent */
|
||||
ext.map = found.map ? ext.start - found.start + found.map : 0;
|
||||
ext.flags = found.flags;
|
||||
@@ -1065,15 +1140,6 @@ out:
|
||||
* than completely exhausting the avail list or overflowing the freed
|
||||
* list.
|
||||
*
|
||||
* An extent modification dirties three distinct leaves of an allocator
|
||||
* btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent. Dirtying the paths to these
|
||||
* leaves can grow the tree and grow/shrink neighbours at each level.
|
||||
* We over-estimate the number of blocks allocated and freed (the paths
|
||||
* share a root, growth doesn't free) to err on the simpler and safer
|
||||
* side. The overhead is minimal given the relatively large list blocks
|
||||
* and relatively short allocator trees.
|
||||
*
|
||||
* The caller tells us how many extents they're about to modify and how
|
||||
* many other additional blocks they may cow manually. And finally, the
|
||||
* caller could be the first to dirty the avail and freed blocks in the
|
||||
@@ -1082,7 +1148,7 @@ out:
|
||||
static bool list_has_blocks(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root, u32 extents, u32 addl_blocks)
|
||||
{
|
||||
u32 tree_blocks = (((1 + root->root.height) * 2) * 3) * extents;
|
||||
u32 tree_blocks = extent_mod_blocks(root->root.height) * extents;
|
||||
u32 most = 1 + tree_blocks + addl_blocks;
|
||||
|
||||
if (le32_to_cpu(alloc->avail.first_nr) < most) {
|
||||
@@ -1318,6 +1384,38 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
return lo;
|
||||
}
|
||||
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space)
|
||||
{
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
*avail_total = le32_to_cpu(alloc->avail.first_nr);
|
||||
*freed_space = list_block_space(alloc->freed.first_nr);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the caller's consumption of nr from either avail or
|
||||
* freed would end up exceeding their budget relative to the starting
|
||||
* remaining snapshot they took.
|
||||
*/
|
||||
bool scoutfs_alloc_meta_low_since(struct scoutfs_alloc *alloc, u32 avail_start, u32 freed_start,
|
||||
u32 budget, u32 nr)
|
||||
{
|
||||
u32 avail_use;
|
||||
u32 freed_use;
|
||||
u32 avail;
|
||||
u32 freed;
|
||||
|
||||
scoutfs_alloc_meta_remaining(alloc, &avail, &freed);
|
||||
|
||||
avail_use = avail_start - avail;
|
||||
freed_use = freed_start - freed;
|
||||
|
||||
return ((avail_use + nr) > budget) || ((freed_use + nr) > budget);
|
||||
}
|
||||
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag)
|
||||
{
|
||||
@@ -1514,12 +1612,10 @@ out:
|
||||
* call the caller's callback. This assumes that the super it's reading
|
||||
* could be stale and will retry if it encounters stale blocks.
|
||||
*/
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
int scoutfs_alloc_foreach(struct super_block *sb, scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
@@ -1528,26 +1624,18 @@ int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
do {
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
refs[0] = super->logs_root.ref;
|
||||
refs[1] = super->srch_root.ref;
|
||||
ret = scoutfs_alloc_foreach_super(sb, super, cb, arg);
|
||||
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &super->logs_root.ref,
|
||||
&super->srch_root.ref);
|
||||
} while (ret == -ESTALE);
|
||||
|
||||
ret = scoutfs_alloc_foreach_super(sb, super, cb, arg);
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&stale_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
BUILD_BUG_ON(sizeof(stale_refs) != sizeof(refs));
|
||||
memcpy(stale_refs, refs, sizeof(stale_refs));
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -19,14 +19,11 @@
|
||||
(128ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* The largest aligned region that we'll try to allocate at the end of
|
||||
* the file as it's extended. This is also limited to the current file
|
||||
* size so we can only waste at most twice the total file size when
|
||||
* files are less than this. We try to keep this around the point of
|
||||
* diminishing returns in streaming performance of common data devices
|
||||
* to limit waste.
|
||||
* The default size that we'll try to preallocate. This is trying to
|
||||
* hit the limit of large efficient device writes while minimizing
|
||||
* wasted preallocation that is never used.
|
||||
*/
|
||||
#define SCOUTFS_DATA_EXTEND_PREALLOC_LIMIT \
|
||||
#define SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS \
|
||||
(8ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
@@ -131,7 +128,7 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks);
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget);
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
@@ -158,6 +155,9 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr);
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space);
|
||||
bool scoutfs_alloc_meta_low_since(struct scoutfs_alloc *alloc, u32 avail_start, u32 freed_start,
|
||||
u32 budget, u32 nr);
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag);
|
||||
|
||||
|
||||
252
kmod/src/attr_x.c
Normal file
252
kmod/src/attr_x.c
Normal file
@@ -0,0 +1,252 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "lock.h"
|
||||
#include "trans.h"
|
||||
#include "attr_x.h"
|
||||
|
||||
static int validate_attr_x_input(struct super_block *sb, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX__UNKNOWN) ||
|
||||
(iax->x_flags & SCOUTFS_IOC_IAX_F__UNKNOWN))
|
||||
return -EINVAL;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_RETENTION)))
|
||||
return ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID)))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mask indicates interest in the given attr then set the field
|
||||
* to the caller's value and return the new size if it didn't already
|
||||
* include the attr field.
|
||||
*/
|
||||
#define fill_attr(size, iax, bit, field, val) \
|
||||
({ \
|
||||
__typeof__(iax) _iax = (iax); \
|
||||
__typeof__(size) _size = (size); \
|
||||
\
|
||||
if (_iax->x_mask & (bit)) { \
|
||||
_iax->field = (val); \
|
||||
_size = max(_size, offsetof(struct scoutfs_ioctl_inode_attr_x, field) + \
|
||||
sizeof_field(struct scoutfs_ioctl_inode_attr_x, field)); \
|
||||
} \
|
||||
\
|
||||
_size; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Returns -errno on error, or >= number of bytes filled by the
|
||||
* response. 0 can be returned if no attributes are requested in the
|
||||
* input x_mask.
|
||||
*/
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
size_t size = 0;
|
||||
u64 offline;
|
||||
u64 online;
|
||||
u64 bits;
|
||||
int ret;
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_META_SEQ,
|
||||
meta_seq, scoutfs_inode_meta_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_SEQ,
|
||||
data_seq, scoutfs_inode_data_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_VERSION,
|
||||
data_version, scoutfs_inode_data_version(inode));
|
||||
if (iax->x_mask & (SCOUTFS_IOC_IAX_ONLINE_BLOCKS | SCOUTFS_IOC_IAX_OFFLINE_BLOCKS)) {
|
||||
scoutfs_inode_get_onoff(inode, &online, &offline);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_ONLINE_BLOCKS,
|
||||
online_blocks, online);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_OFFLINE_BLOCKS,
|
||||
offline_blocks, offline);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_sec, inode->i_ctime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_nsec, inode->i_ctime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_sec, si->crtime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_nsec, si->crtime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_SIZE, size, i_size_read(inode));
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX__BITS) {
|
||||
bits = 0;
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(scoutfs_inode_get_flags(inode) & SCOUTFS_INO_FLAG_RETENTION))
|
||||
bits |= SCOUTFS_IOC_IAX_B_RETENTION;
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX__BITS, bits, bits);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_PROJECT_ID,
|
||||
project_id, scoutfs_inode_get_proj(inode));
|
||||
|
||||
ret = size;
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valid_attr_changes(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
/* provided data_version must be non-zero */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version == 0))
|
||||
return false;
|
||||
|
||||
/* can only set size or data version in new regular files */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) ||
|
||||
(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)) &&
|
||||
(!S_ISREG(inode->i_mode) || scoutfs_inode_data_version(inode) != 0))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero data_version with non-zero size */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) && (iax->size > 0)) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) || (iax->data_version == 0)))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero size when setting offline extents to that size */
|
||||
if ((iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_SIZE) || (iax->size == 0)))
|
||||
return false;
|
||||
|
||||
/* the retention bit only applies to regular files */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) && !S_ISREG(inode->i_mode))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool set_data_seq;
|
||||
int ret;
|
||||
|
||||
/* initially all setting is root only, could loosen with finer grained checks */
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/* check for errors before making any changes */
|
||||
if (!valid_attr_changes(inode, iax)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* retention prevents modification unless also clearing retention */
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0 && !((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
!(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION)))
|
||||
goto unlock;
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version > 0))
|
||||
set_data_seq = true;
|
||||
else
|
||||
set_data_seq = false;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq, true);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
/* creating offline extent first, it might fail */
|
||||
if (iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) {
|
||||
ret = scoutfs_data_init_offline_extent(inode, iax->size, lock);
|
||||
if (ret)
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* make all changes once they're all checked and will succeed */
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)
|
||||
scoutfs_inode_set_data_version(inode, iax->data_version);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_SIZE)
|
||||
i_size_write(inode, iax->size);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CTIME) {
|
||||
inode->i_ctime.tv_sec = iax->ctime_sec;
|
||||
inode->i_ctime.tv_nsec = iax->ctime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CRTIME) {
|
||||
si->crtime.tv_sec = iax->crtime_sec;
|
||||
si->crtime.tv_nsec = iax->crtime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) {
|
||||
scoutfs_inode_set_flags(inode, ~SCOUTFS_INO_FLAG_RETENTION,
|
||||
(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION) ?
|
||||
SCOUTFS_INO_FLAG_RETENTION : 0);
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID)
|
||||
scoutfs_inode_set_proj(inode, iax->project_id);
|
||||
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
ret = 0;
|
||||
release:
|
||||
scoutfs_release_trans(sb);
|
||||
unlock:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
11
kmod/src/attr_x.h
Normal file
11
kmod/src/attr_x.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef _SCOUTFS_ATTR_X_H_
|
||||
#define _SCOUTFS_ATTR_X_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include "ioctl.h"
|
||||
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
|
||||
#endif
|
||||
563
kmod/src/block.c
563
kmod/src/block.c
@@ -21,6 +21,8 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
@@ -30,34 +32,21 @@
|
||||
#include "scoutfs_trace.h"
|
||||
#include "alloc.h"
|
||||
#include "triggers.h"
|
||||
#include "util.h"
|
||||
|
||||
/*
|
||||
* The scoutfs block cache manages metadata blocks that can be larger
|
||||
* than the page size. Callers can have their own contexts for tracking
|
||||
* dirty blocks that are written together. We pin dirty blocks in
|
||||
* memory and only checksum them all as they're all written.
|
||||
*
|
||||
* Memory reclaim is driven by maintaining two very coarse groups of
|
||||
* blocks. As we access blocks we mark them with an increasing counter
|
||||
* to discourage them from being reclaimed. We then define a threshold
|
||||
* at the current counter minus half the population. Recent blocks have
|
||||
* a counter greater than the threshold, and all other blocks with
|
||||
* counters less than it are considered older and are candidates for
|
||||
* reclaim. This results in access updates rarely modifying an atomic
|
||||
* counter as blocks need to be moved into the recent group, and shrink
|
||||
* can randomly scan blocks looking for the half of the population that
|
||||
* will be in the old group. It's reasonably effective, but is
|
||||
* particularly efficient and avoids contention between concurrent
|
||||
* accesses and shrinking.
|
||||
*/
|
||||
|
||||
struct block_info {
|
||||
struct super_block *sb;
|
||||
atomic_t total_inserted;
|
||||
atomic64_t access_counter;
|
||||
struct rhashtable ht;
|
||||
struct list_lru lru;
|
||||
wait_queue_head_t waitq;
|
||||
struct shrinker shrinker;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
struct work_struct free_work;
|
||||
struct llist_head free_llist;
|
||||
};
|
||||
@@ -74,28 +63,15 @@ enum block_status_bits {
|
||||
BLOCK_BIT_PAGE_ALLOC, /* page (possibly high order) allocation */
|
||||
BLOCK_BIT_VIRT, /* mapped virt allocation */
|
||||
BLOCK_BIT_CRC_VALID, /* crc has been verified */
|
||||
BLOCK_BIT_ACCESSED, /* seen by lookup since last lru add/walk */
|
||||
};
|
||||
|
||||
/*
|
||||
* We want to tie atomic changes in refcounts to whether or not the
|
||||
* block is still visible in the hash table, so we store the hash
|
||||
* table's reference up at a known high bit. We could naturally set the
|
||||
* inserted bit through excessive refcount increments. We don't do
|
||||
* anything about that but at least warn if we get close.
|
||||
*
|
||||
* We're avoiding the high byte for no real good reason, just out of a
|
||||
* historical fear of implementations that don't provide the full
|
||||
* precision.
|
||||
*/
|
||||
#define BLOCK_REF_INSERTED (1U << 23)
|
||||
#define BLOCK_REF_FULL (BLOCK_REF_INSERTED >> 1)
|
||||
|
||||
struct block_private {
|
||||
struct scoutfs_block bl;
|
||||
struct super_block *sb;
|
||||
atomic_t refcount;
|
||||
u64 accessed;
|
||||
struct rhash_head ht_head;
|
||||
struct list_head lru_head;
|
||||
struct list_head dirty_entry;
|
||||
struct llist_node free_node;
|
||||
unsigned long bits;
|
||||
@@ -110,7 +86,7 @@ struct block_private {
|
||||
do { \
|
||||
__typeof__(bp) _bp = (bp); \
|
||||
trace_scoutfs_block_##which(_bp->sb, _bp, _bp->bl.blkno, atomic_read(&_bp->refcount), \
|
||||
atomic_read(&_bp->io_count), _bp->bits, _bp->accessed); \
|
||||
atomic_read(&_bp->io_count), _bp->bits); \
|
||||
} while (0)
|
||||
|
||||
#define BLOCK_PRIVATE(_bl) \
|
||||
@@ -118,8 +94,7 @@ do { \
|
||||
|
||||
static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
|
||||
{
|
||||
int off = offsetof(struct scoutfs_block_header, crc) +
|
||||
FIELD_SIZEOF(struct scoutfs_block_header, crc);
|
||||
int off = offsetofend(struct scoutfs_block_header, crc);
|
||||
u32 calc = crc32c(~0, (char *)hdr + off, size - off);
|
||||
|
||||
return cpu_to_le32(calc);
|
||||
@@ -128,7 +103,7 @@ static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
|
||||
static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct block_private *bp;
|
||||
unsigned int noio_flags;
|
||||
unsigned int nofs_flags;
|
||||
|
||||
/*
|
||||
* If we had multiple blocks per page we'd need to be a little
|
||||
@@ -156,9 +131,9 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
* spurious reclaim-on dependencies and warnings.
|
||||
*/
|
||||
lockdep_off();
|
||||
noio_flags = memalloc_noio_save();
|
||||
bp->virt = __vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
memalloc_noio_restore(noio_flags);
|
||||
nofs_flags = memalloc_nofs_save();
|
||||
bp->virt = kc__vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM);
|
||||
memalloc_nofs_restore(nofs_flags);
|
||||
lockdep_on();
|
||||
|
||||
if (!bp->virt) {
|
||||
@@ -175,6 +150,7 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
bp->bl.blkno = blkno;
|
||||
bp->sb = sb;
|
||||
atomic_set(&bp->refcount, 1);
|
||||
INIT_LIST_HEAD(&bp->lru_head);
|
||||
INIT_LIST_HEAD(&bp->dirty_entry);
|
||||
set_bit(BLOCK_BIT_NEW, &bp->bits);
|
||||
atomic_set(&bp->io_count, 0);
|
||||
@@ -232,32 +208,85 @@ static void block_free_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a reference to a block while holding an existing reference.
|
||||
* Users of blocks hold a refcount. If putting a refcount drops to zero
|
||||
* then the block is freed.
|
||||
*
|
||||
* Acquiring new references and claiming the exclusive right to tear
|
||||
* down a block is built around this LIVE_REFCOUNT_BASE refcount value.
|
||||
* As blocks are initially cached they have the live base added to their
|
||||
* refcount. Lookups will only increment the refcount and return blocks
|
||||
* for reference holders while the refcount is >= than the base.
|
||||
*
|
||||
* To remove a block from the cache and eventually free it, either by
|
||||
* the lru walk in the shrinker, or by reference holders, the live base
|
||||
* is removed and turned into a normal refcount increment that will be
|
||||
* put by the caller. This can only be done once for a block, and once
|
||||
* its done lookup will not return any more references.
|
||||
*/
|
||||
#define LIVE_REFCOUNT_BASE (INT_MAX ^ (INT_MAX >> 1))
|
||||
|
||||
/*
|
||||
* Inc the refcount while holding an incremented refcount. We can't
|
||||
* have so many individual reference holders that they pass the live
|
||||
* base.
|
||||
*/
|
||||
static void block_get(struct block_private *bp)
|
||||
{
|
||||
WARN_ON_ONCE((atomic_read(&bp->refcount) & ~BLOCK_REF_INSERTED) <= 0);
|
||||
int now = atomic_inc_return(&bp->refcount);
|
||||
|
||||
atomic_inc(&bp->refcount);
|
||||
BUG_ON(now <= 1);
|
||||
BUG_ON(now == LIVE_REFCOUNT_BASE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a reference to a block as long as it's been inserted in the hash
|
||||
* table and hasn't been removed.
|
||||
*/
|
||||
static struct block_private *block_get_if_inserted(struct block_private *bp)
|
||||
* if (*v >= u) {
|
||||
* *v += a;
|
||||
* return true;
|
||||
* }
|
||||
*/
|
||||
static bool atomic_add_unless_less(atomic_t *v, int a, int u)
|
||||
{
|
||||
int cnt;
|
||||
int c;
|
||||
|
||||
do {
|
||||
cnt = atomic_read(&bp->refcount);
|
||||
WARN_ON_ONCE(cnt & BLOCK_REF_FULL);
|
||||
if (!(cnt & BLOCK_REF_INSERTED))
|
||||
return NULL;
|
||||
c = atomic_read(v);
|
||||
if (c < u)
|
||||
return false;
|
||||
} while (atomic_cmpxchg(v, c, c + a) != c);
|
||||
|
||||
} while (atomic_cmpxchg(&bp->refcount, cnt, cnt + 1) != cnt);
|
||||
return true;
|
||||
}
|
||||
|
||||
return bp;
|
||||
static bool block_get_if_live(struct block_private *bp)
|
||||
{
|
||||
return atomic_add_unless_less(&bp->refcount, 1, LIVE_REFCOUNT_BASE);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the refcount still has the live base, subtract it and increment
|
||||
* the callers refcount that they'll put.
|
||||
*/
|
||||
static bool block_get_remove_live(struct block_private *bp)
|
||||
{
|
||||
return atomic_add_unless_less(&bp->refcount, (1 - LIVE_REFCOUNT_BASE), LIVE_REFCOUNT_BASE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only get the live base refcount if it is the only refcount remaining.
|
||||
* This means that there are no active refcount holders and the block
|
||||
* can't be dirty or under IO, which both hold references.
|
||||
*/
|
||||
static bool block_get_remove_live_only(struct block_private *bp)
|
||||
{
|
||||
int c;
|
||||
|
||||
do {
|
||||
c = atomic_read(&bp->refcount);
|
||||
if (c != LIVE_REFCOUNT_BASE)
|
||||
return false;
|
||||
} while (atomic_cmpxchg(&bp->refcount, c, c - LIVE_REFCOUNT_BASE + 1) != c);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -289,104 +318,73 @@ static const struct rhashtable_params block_ht_params = {
|
||||
};
|
||||
|
||||
/*
|
||||
* Insert a new block into the hash table. Once it is inserted in the
|
||||
* hash table readers can start getting references. The caller may have
|
||||
* multiple refs but the block can't already be inserted.
|
||||
* Insert the block into the cache so that it's visible for lookups.
|
||||
* The caller can hold references (including for a dirty block).
|
||||
*
|
||||
* We make sure the base is added and the block is in the lru once it's
|
||||
* in the hash. If hash table insertion fails it'll be briefly visible
|
||||
* in the lru, but won't be isolated/evicted because we hold an
|
||||
* incremented refcount in addition to the live base.
|
||||
*/
|
||||
static int block_insert(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) & BLOCK_REF_INSERTED);
|
||||
|
||||
BUG_ON(atomic_read(&bp->refcount) >= LIVE_REFCOUNT_BASE);
|
||||
atomic_add(LIVE_REFCOUNT_BASE, &bp->refcount);
|
||||
smp_mb__after_atomic(); /* make sure live base is visible to list_lru walk */
|
||||
list_lru_add_obj(&binf->lru, &bp->lru_head);
|
||||
retry:
|
||||
atomic_add(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
ret = rhashtable_lookup_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
if (ret < 0) {
|
||||
atomic_sub(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
if (ret == -EBUSY) {
|
||||
/* wait for pending rebalance to finish */
|
||||
synchronize_rcu();
|
||||
goto retry;
|
||||
} else {
|
||||
atomic_sub(LIVE_REFCOUNT_BASE, &bp->refcount);
|
||||
BUG_ON(atomic_read(&bp->refcount) >= LIVE_REFCOUNT_BASE);
|
||||
list_lru_del_obj(&binf->lru, &bp->lru_head);
|
||||
}
|
||||
} else {
|
||||
atomic_inc(&binf->total_inserted);
|
||||
TRACE_BLOCK(insert, bp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 accessed_recently(struct block_info *binf)
|
||||
{
|
||||
return atomic64_read(&binf->access_counter) - (atomic_read(&binf->total_inserted) >> 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that a block that is being accessed is less likely to be
|
||||
* reclaimed if it is seen by the shrinker. If the block hasn't been
|
||||
* accessed recently we update its accessed value.
|
||||
* Indicate to the lru walker that this block has been accessed since it
|
||||
* was added or last walked.
|
||||
*/
|
||||
static void block_accessed(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
|
||||
if (bp->accessed == 0 || bp->accessed < accessed_recently(binf)) {
|
||||
if (!test_and_set_bit(BLOCK_BIT_ACCESSED, &bp->bits))
|
||||
scoutfs_inc_counter(sb, block_cache_access_update);
|
||||
bp->accessed = atomic64_inc_return(&binf->access_counter);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller wants to remove the block from the hash table and has an
|
||||
* idea what the refcount should be. If the refcount does still
|
||||
* indicate that the block is hashed, and we're able to clear that bit,
|
||||
* then we can remove it from the hash table.
|
||||
* Remove the block from the cache. When this returns the block won't
|
||||
* be visible for additional references from lookup.
|
||||
*
|
||||
* The caller makes sure that it's safe to be referencing this block,
|
||||
* either with their own held reference (most everything) or by being in
|
||||
* an rcu grace period (shrink).
|
||||
*/
|
||||
static bool block_remove_cnt(struct super_block *sb, struct block_private *bp, int cnt)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
int ret;
|
||||
|
||||
if ((cnt & BLOCK_REF_INSERTED) &&
|
||||
(atomic_cmpxchg(&bp->refcount, cnt, cnt & ~BLOCK_REF_INSERTED) == cnt)) {
|
||||
|
||||
TRACE_BLOCK(remove, bp);
|
||||
ret = rhashtable_remove_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
WARN_ON_ONCE(ret); /* must have been inserted */
|
||||
atomic_dec(&binf->total_inserted);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to remove the block from the hash table as long as the refcount
|
||||
* indicates that it is still in the hash table. This can be racing
|
||||
* with normal refcount changes so it might have to retry.
|
||||
* We always try and remove from the hash table. It's safe to remove a
|
||||
* block that isn't hashed, it just returns -ENOENT.
|
||||
*
|
||||
* This is racing with the lru walk in the shrinker also trying to
|
||||
* remove idle blocks from the cache. They both try to remove the live
|
||||
* refcount base and perform their removal and put if they get it.
|
||||
*/
|
||||
static void block_remove(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
int cnt;
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
|
||||
do {
|
||||
cnt = atomic_read(&bp->refcount);
|
||||
} while ((cnt & BLOCK_REF_INSERTED) && !block_remove_cnt(sb, bp, cnt));
|
||||
}
|
||||
rhashtable_remove_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
|
||||
/*
|
||||
* Take one shot at removing the block from the hash table if it's still
|
||||
* in the hash table and the caller has the only other reference.
|
||||
*/
|
||||
static bool block_remove_solo(struct super_block *sb, struct block_private *bp)
|
||||
{
|
||||
return block_remove_cnt(sb, bp, BLOCK_REF_INSERTED | 1);
|
||||
if (block_get_remove_live(bp)) {
|
||||
list_lru_del_obj(&binf->lru, &bp->lru_head);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool io_busy(struct block_private *bp)
|
||||
@@ -395,37 +393,6 @@ static bool io_busy(struct block_private *bp)
|
||||
return test_bit(BLOCK_BIT_IO_BUSY, &bp->bits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called during shutdown with no other users.
|
||||
*/
|
||||
static void block_remove_all(struct super_block *sb)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
struct rhashtable_iter iter;
|
||||
struct block_private *bp;
|
||||
|
||||
rhashtable_walk_enter(&binf->ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
for (;;) {
|
||||
bp = rhashtable_walk_next(&iter);
|
||||
if (bp == NULL)
|
||||
break;
|
||||
if (bp == ERR_PTR(-EAGAIN))
|
||||
continue;
|
||||
|
||||
if (block_get_if_inserted(bp)) {
|
||||
block_remove(sb, bp);
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) != 1);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&binf->total_inserted) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX The io_count and sb fields in the block_private are only used
|
||||
@@ -436,11 +403,10 @@ static void block_remove_all(struct super_block *sb)
|
||||
* possible. Final freeing, verifying checksums, and unlinking errored
|
||||
* blocks are all done by future users of the blocks.
|
||||
*/
|
||||
static void block_end_io(struct super_block *sb, int rw,
|
||||
static void block_end_io(struct super_block *sb, blk_opf_t opf,
|
||||
struct block_private *bp, int err)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
bool is_read = !(rw & WRITE);
|
||||
|
||||
if (err) {
|
||||
scoutfs_inc_counter(sb, block_cache_end_io_error);
|
||||
@@ -450,7 +416,7 @@ static void block_end_io(struct super_block *sb, int rw,
|
||||
if (!atomic_dec_and_test(&bp->io_count))
|
||||
return;
|
||||
|
||||
if (is_read && !test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
if (!op_is_write(opf) && !test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
set_bit(BLOCK_BIT_UPTODATE, &bp->bits);
|
||||
|
||||
clear_bit(BLOCK_BIT_IO_BUSY, &bp->bits);
|
||||
@@ -463,13 +429,13 @@ static void block_end_io(struct super_block *sb, int rw,
|
||||
wake_up(&binf->waitq);
|
||||
}
|
||||
|
||||
static void block_bio_end_io(struct bio *bio, int err)
|
||||
static void KC_DECLARE_BIO_END_IO(block_bio_end_io, struct bio *bio)
|
||||
{
|
||||
struct block_private *bp = bio->bi_private;
|
||||
struct super_block *sb = bp->sb;
|
||||
|
||||
TRACE_BLOCK(end_io, bp);
|
||||
block_end_io(sb, bio->bi_rw, bp, err);
|
||||
block_end_io(sb, kc_bio_get_opf(bio), bp, kc_bio_get_errno(bio));
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
@@ -477,7 +443,7 @@ static void block_bio_end_io(struct bio *bio, int err)
|
||||
* Kick off IO for a single block.
|
||||
*/
|
||||
static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
int rw)
|
||||
blk_opf_t opf)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct bio *bio = NULL;
|
||||
@@ -488,7 +454,7 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
int ret = 0;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
return -ENOLINK;
|
||||
|
||||
sector = bp->bl.blkno << (SCOUTFS_BLOCK_LG_SHIFT - 9);
|
||||
|
||||
@@ -504,14 +470,13 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
|
||||
for (off = 0; off < SCOUTFS_BLOCK_LG_SIZE; off += PAGE_SIZE) {
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOFS, SCOUTFS_BLOCK_LG_PAGES_PER);
|
||||
bio = kc_bio_alloc(sbi->meta_bdev, SCOUTFS_BLOCK_LG_PAGES_PER, opf, GFP_NOFS);
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector + (off >> 9);
|
||||
bio->bi_bdev = sbi->meta_bdev;
|
||||
kc_bio_set_sector(bio, sector + (off >> 9));
|
||||
bio->bi_end_io = block_bio_end_io;
|
||||
bio->bi_private = bp;
|
||||
|
||||
@@ -528,22 +493,26 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
BUG();
|
||||
|
||||
if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
|
||||
submit_bio(rw, bio);
|
||||
kc_submit_bio(bio);
|
||||
bio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (bio)
|
||||
submit_bio(rw, bio);
|
||||
kc_submit_bio(bio);
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
/* let racing end_io know we're done */
|
||||
block_end_io(sb, rw, bp, ret);
|
||||
block_end_io(sb, opf, bp, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a block with an elevated refcount if it was present in the
|
||||
* hash table and its refcount didn't indicate that it was being freed.
|
||||
*/
|
||||
static struct block_private *block_lookup(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
@@ -551,8 +520,8 @@ static struct block_private *block_lookup(struct super_block *sb, u64 blkno)
|
||||
|
||||
rcu_read_lock();
|
||||
bp = rhashtable_lookup(&binf->ht, &blkno, block_ht_params);
|
||||
if (bp)
|
||||
bp = block_get_if_inserted(bp);
|
||||
if (bp && !block_get_if_live(bp))
|
||||
bp = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return bp;
|
||||
@@ -640,7 +609,7 @@ static struct block_private *block_read(struct super_block *sb, u64 blkno)
|
||||
|
||||
if (!test_bit(BLOCK_BIT_UPTODATE, &bp->bits) &&
|
||||
test_and_clear_bit(BLOCK_BIT_NEW, &bp->bits)) {
|
||||
ret = block_submit_bio(sb, bp, READ);
|
||||
ret = block_submit_bio(sb, bp, REQ_OP_READ);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
@@ -677,10 +646,11 @@ out:
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct block_private *bp = NULL;
|
||||
bool retried = false;
|
||||
__le32 crc = 0;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
@@ -693,7 +663,9 @@ retry:
|
||||
|
||||
/* corrupted writes might be a sign of a stale reference */
|
||||
if (!test_bit(BLOCK_BIT_CRC_VALID, &bp->bits)) {
|
||||
if (hdr->crc != block_calc_crc(hdr, SCOUTFS_BLOCK_LG_SIZE)) {
|
||||
crc = block_calc_crc(hdr, SCOUTFS_BLOCK_LG_SIZE);
|
||||
if (hdr->crc != crc) {
|
||||
trace_scoutfs_block_stale(sb, ref, hdr, magic, le32_to_cpu(crc));
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
@@ -701,16 +673,17 @@ retry:
|
||||
set_bit(BLOCK_BIT_CRC_VALID, &bp->bits);
|
||||
}
|
||||
|
||||
if (hdr->magic != cpu_to_le32(magic) || hdr->fsid != super->hdr.fsid ||
|
||||
if (hdr->magic != cpu_to_le32(magic) || hdr->fsid != cpu_to_le64(sbi->fsid) ||
|
||||
hdr->seq != ref->seq || hdr->blkno != ref->blkno) {
|
||||
trace_scoutfs_block_stale(sb, ref, hdr, magic, 0);
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if ((ret == -ESTALE || scoutfs_trigger(sb, BLOCK_REMOVE_STALE)) &&
|
||||
!retried && !block_is_dirty(bp)) {
|
||||
if (!retried && !IS_ERR_OR_NULL(bp) && !block_is_dirty(bp) &&
|
||||
(ret == -ESTALE || scoutfs_trigger(sb, BLOCK_REMOVE_STALE))) {
|
||||
retried = true;
|
||||
scoutfs_inc_counter(sb, block_cache_remove_stale);
|
||||
block_remove(sb, bp);
|
||||
@@ -728,6 +701,36 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool stale_refs_match(struct scoutfs_block_ref *caller, struct scoutfs_block_ref *saved)
|
||||
{
|
||||
return !caller || (caller->blkno == saved->blkno && caller->seq == saved->seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a read of a reference that gave ESTALE should be retried or
|
||||
* should generate a hard error. If this is the second time we got
|
||||
* ESTALE from the same refs then we return EIO and the caller should
|
||||
* stop. As long as we keep seeing different refs we'll return ESTALE
|
||||
* and the caller can keep trying.
|
||||
*/
|
||||
int scoutfs_block_check_stale(struct super_block *sb, int ret,
|
||||
struct scoutfs_block_saved_refs *saved,
|
||||
struct scoutfs_block_ref *a, struct scoutfs_block_ref *b)
|
||||
{
|
||||
if (ret == -ESTALE) {
|
||||
if (stale_refs_match(a, &saved->refs[0]) && stale_refs_match(b, &saved->refs[1])){
|
||||
ret = -EIO;
|
||||
} else {
|
||||
if (a)
|
||||
saved->refs[0] = *a;
|
||||
if (b)
|
||||
saved->refs[1] = *b;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(bl))
|
||||
@@ -797,7 +800,7 @@ int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
u32 magic, struct scoutfs_block **bl_ret,
|
||||
u64 dirty_blkno, u64 *ref_blkno)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block *cow_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct block_private *exist_bp = NULL;
|
||||
@@ -865,7 +868,7 @@ int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
|
||||
hdr = bl->data;
|
||||
hdr->magic = cpu_to_le32(magic);
|
||||
hdr->fsid = super->hdr.fsid;
|
||||
hdr->fsid = cpu_to_le64(sbi->fsid);
|
||||
hdr->blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
|
||||
|
||||
@@ -939,7 +942,7 @@ int scoutfs_block_writer_write(struct super_block *sb,
|
||||
/* retry previous write errors */
|
||||
clear_bit(BLOCK_BIT_ERROR, &bp->bits);
|
||||
|
||||
ret = block_submit_bio(sb, bp, WRITE);
|
||||
ret = block_submit_bio(sb, bp, REQ_OP_WRITE);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -1039,96 +1042,91 @@ u64 scoutfs_block_writer_dirty_bytes(struct super_block *sb,
|
||||
return wri->nr_dirty_blocks * SCOUTFS_BLOCK_LG_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a number of cached blocks that haven't been used recently.
|
||||
*
|
||||
* We don't maintain a strictly ordered LRU to avoid the contention of
|
||||
* accesses always moving blocks around in some precise global
|
||||
* structure.
|
||||
*
|
||||
* Instead we use counters to divide the blocks into two roughly equal
|
||||
* groups by how recently they were accessed. We randomly walk all
|
||||
* inserted blocks looking for any blocks in the older half to remove
|
||||
* and free. The random walk and there being two groups means that we
|
||||
* typically only walk a small multiple of the number we're looking for
|
||||
* before we find them all.
|
||||
*
|
||||
* Our rcu walk of blocks can see blocks in all stages of their life
|
||||
* cycle, from dirty blocks to those with 0 references that are queued
|
||||
* for freeing. We only want to free idle inserted blocks so we
|
||||
* atomically remove blocks when the only references are ours and the
|
||||
* hash table.
|
||||
*/
|
||||
static int block_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
||||
static unsigned long block_count_objects(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct block_info *binf = container_of(shrink, struct block_info,
|
||||
shrinker);
|
||||
struct block_info *binf = KC_SHRINKER_CONTAINER_OF(shrink, struct block_info);
|
||||
struct super_block *sb = binf->sb;
|
||||
struct rhashtable_iter iter;
|
||||
struct block_private *bp;
|
||||
unsigned long nr;
|
||||
u64 recently;
|
||||
|
||||
nr = sc->nr_to_scan;
|
||||
if (nr == 0)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, block_cache_count_objects);
|
||||
return list_lru_shrink_count(&binf->lru, sc);
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_shrink);
|
||||
struct isolate_args {
|
||||
struct super_block *sb;
|
||||
struct list_head dispose;
|
||||
};
|
||||
|
||||
nr = DIV_ROUND_UP(nr, SCOUTFS_BLOCK_LG_PAGES_PER);
|
||||
|
||||
restart:
|
||||
recently = accessed_recently(binf);
|
||||
rhashtable_walk_enter(&binf->ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
/*
|
||||
* This isn't great but I don't see a better way. We want to
|
||||
* walk the hash from a random point so that we're not
|
||||
* constantly walking over the same region that we've already
|
||||
* freed old blocks within. The interface doesn't let us do
|
||||
* this explicitly, but this seems to work? The difference this
|
||||
* makes is enormous, around a few orders of magnitude fewer
|
||||
* _nexts per shrink.
|
||||
*/
|
||||
if (iter.walker.tbl)
|
||||
iter.slot = prandom_u32_max(iter.walker.tbl->size);
|
||||
|
||||
while (nr > 0) {
|
||||
bp = rhashtable_walk_next(&iter);
|
||||
if (bp == NULL)
|
||||
break;
|
||||
if (bp == ERR_PTR(-EAGAIN)) {
|
||||
/* hard exit to wait for rcu rebalance to finish */
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_restart);
|
||||
synchronize_rcu();
|
||||
goto restart;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_next);
|
||||
|
||||
if (bp->accessed >= recently) {
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_recent);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (block_get_if_inserted(bp)) {
|
||||
if (block_remove_solo(sb, bp)) {
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_remove);
|
||||
TRACE_BLOCK(shrink, bp);
|
||||
nr--;
|
||||
}
|
||||
block_put(sb, bp);
|
||||
}
|
||||
#define DECLARE_ISOLATE_ARGS(sb_, name_) \
|
||||
struct isolate_args name_ = { \
|
||||
.sb = sb_, \
|
||||
.dispose = LIST_HEAD_INIT(name_.dispose), \
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
out:
|
||||
return min_t(u64, (u64)atomic_read(&binf->total_inserted) * SCOUTFS_BLOCK_LG_PAGES_PER,
|
||||
INT_MAX);
|
||||
static enum lru_status isolate_lru_block(struct list_head *item, struct list_lru_one *list,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct block_private *bp = container_of(item, struct block_private, lru_head);
|
||||
struct isolate_args *ia = cb_arg;
|
||||
|
||||
TRACE_BLOCK(isolate, bp);
|
||||
|
||||
/* rotate accessed blocks to the tail of the list (lazy promotion) */
|
||||
if (test_and_clear_bit(BLOCK_BIT_ACCESSED, &bp->bits)) {
|
||||
scoutfs_inc_counter(ia->sb, block_cache_isolate_rotate);
|
||||
return LRU_ROTATE;
|
||||
}
|
||||
|
||||
/* any refs, including dirty/io, stop us from acquiring lru refcount */
|
||||
if (!block_get_remove_live_only(bp)) {
|
||||
scoutfs_inc_counter(ia->sb, block_cache_isolate_skip);
|
||||
return LRU_SKIP;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(ia->sb, block_cache_isolate_removed);
|
||||
list_lru_isolate_move(list, &bp->lru_head, &ia->dispose);
|
||||
return LRU_REMOVED;
|
||||
}
|
||||
|
||||
static void shrink_dispose_blocks(struct super_block *sb, struct list_head *dispose)
|
||||
{
|
||||
struct block_private *bp;
|
||||
struct block_private *bp__;
|
||||
|
||||
list_for_each_entry_safe(bp, bp__, dispose, lru_head) {
|
||||
list_del_init(&bp->lru_head);
|
||||
block_remove(sb, bp);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long block_scan_objects(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct block_info *binf = KC_SHRINKER_CONTAINER_OF(shrink, struct block_info);
|
||||
struct super_block *sb = binf->sb;
|
||||
DECLARE_ISOLATE_ARGS(sb, ia);
|
||||
unsigned long freed;
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_scan_objects);
|
||||
|
||||
freed = kc_list_lru_shrink_walk(&binf->lru, sc, isolate_lru_block, &ia);
|
||||
shrink_dispose_blocks(sb, &ia.dispose);
|
||||
return freed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called during shutdown with no other users. The isolating walk must
|
||||
* find blocks on the lru that only have references for presence on the
|
||||
* lru and in the hash table.
|
||||
*/
|
||||
static void block_shrink_all(struct super_block *sb)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
DECLARE_ISOLATE_ARGS(sb, ia);
|
||||
|
||||
do {
|
||||
kc_list_lru_walk(&binf->lru, isolate_lru_block, &ia, 128);
|
||||
shrink_dispose_blocks(sb, &ia.dispose);
|
||||
} while (list_lru_count(&binf->lru) > 0);
|
||||
}
|
||||
|
||||
struct sm_block_completion {
|
||||
@@ -1136,11 +1134,11 @@ struct sm_block_completion {
|
||||
int err;
|
||||
};
|
||||
|
||||
static void sm_block_bio_end_io(struct bio *bio, int err)
|
||||
static void KC_DECLARE_BIO_END_IO(sm_block_bio_end_io, struct bio *bio)
|
||||
{
|
||||
struct sm_block_completion *sbc = bio->bi_private;
|
||||
|
||||
sbc->err = err;
|
||||
sbc->err = kc_bio_get_errno(bio);
|
||||
complete(&sbc->comp);
|
||||
bio_put(bio);
|
||||
}
|
||||
@@ -1155,9 +1153,8 @@ static void sm_block_bio_end_io(struct bio *bio, int err)
|
||||
* only layer that sees the full block buffer so we pass the calculated
|
||||
* crc to the caller for them to check in their context.
|
||||
*/
|
||||
static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw, u64 blkno,
|
||||
struct scoutfs_block_header *hdr, size_t len,
|
||||
__le32 *blk_crc)
|
||||
static int sm_block_io(struct super_block *sb, struct block_device *bdev, blk_opf_t opf,
|
||||
u64 blkno, struct scoutfs_block_header *hdr, size_t len, __le32 *blk_crc)
|
||||
{
|
||||
struct scoutfs_block_header *pg_hdr;
|
||||
struct sm_block_completion sbc;
|
||||
@@ -1168,10 +1165,10 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw
|
||||
BUILD_BUG_ON(PAGE_SIZE < SCOUTFS_BLOCK_SM_SIZE);
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
return -ENOLINK;
|
||||
|
||||
if (WARN_ON_ONCE(len > SCOUTFS_BLOCK_SM_SIZE) ||
|
||||
WARN_ON_ONCE(!(rw & WRITE) && !blk_crc))
|
||||
WARN_ON_ONCE(!op_is_write(opf) && !blk_crc))
|
||||
return -EINVAL;
|
||||
|
||||
page = alloc_page(GFP_NOFS);
|
||||
@@ -1180,7 +1177,7 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw
|
||||
|
||||
pg_hdr = page_address(page);
|
||||
|
||||
if (rw & WRITE) {
|
||||
if (op_is_write(opf)) {
|
||||
memcpy(pg_hdr, hdr, len);
|
||||
if (len < SCOUTFS_BLOCK_SM_SIZE)
|
||||
memset((char *)pg_hdr + len, 0,
|
||||
@@ -1188,14 +1185,13 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw
|
||||
pg_hdr->crc = block_calc_crc(pg_hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
}
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, 1);
|
||||
bio = kc_bio_alloc(bdev, 1, opf, GFP_NOFS);
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bio->bi_sector = blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9);
|
||||
bio->bi_bdev = bdev;
|
||||
kc_bio_set_sector(bio, blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9));
|
||||
bio->bi_end_io = sm_block_bio_end_io;
|
||||
bio->bi_private = &sbc;
|
||||
bio_add_page(bio, page, SCOUTFS_BLOCK_SM_SIZE, 0);
|
||||
@@ -1203,12 +1199,12 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw
|
||||
init_completion(&sbc.comp);
|
||||
sbc.err = 0;
|
||||
|
||||
submit_bio((rw & WRITE) ? WRITE_SYNC : READ_SYNC, bio);
|
||||
kc_submit_bio(bio);
|
||||
|
||||
wait_for_completion(&sbc.comp);
|
||||
ret = sbc.err;
|
||||
|
||||
if (ret == 0 && !(rw & WRITE)) {
|
||||
if (ret == 0 && !op_is_write(opf)) {
|
||||
memcpy(hdr, pg_hdr, len);
|
||||
*blk_crc = block_calc_crc(pg_hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
}
|
||||
@@ -1222,20 +1218,20 @@ int scoutfs_block_read_sm(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr, size_t len,
|
||||
__le32 *blk_crc)
|
||||
{
|
||||
return sm_block_io(sb, bdev, READ, blkno, hdr, len, blk_crc);
|
||||
return sm_block_io(sb, bdev, REQ_OP_READ, blkno, hdr, len, blk_crc);
|
||||
}
|
||||
|
||||
int scoutfs_block_write_sm(struct super_block *sb,
|
||||
struct block_device *bdev, u64 blkno,
|
||||
struct scoutfs_block_header *hdr, size_t len)
|
||||
{
|
||||
return sm_block_io(sb, bdev, WRITE, blkno, hdr, len, NULL);
|
||||
return sm_block_io(sb, bdev, REQ_OP_WRITE, blkno, hdr, len, NULL);
|
||||
}
|
||||
|
||||
int scoutfs_block_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct block_info *binf;
|
||||
struct block_info *binf = NULL;
|
||||
int ret;
|
||||
|
||||
binf = kzalloc(sizeof(struct block_info), GFP_KERNEL);
|
||||
@@ -1244,19 +1240,19 @@ int scoutfs_block_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rhashtable_init(&binf->ht, &block_ht_params);
|
||||
if (ret < 0) {
|
||||
kfree(binf);
|
||||
ret = list_lru_init(&binf->lru);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = rhashtable_init(&binf->ht, &block_ht_params);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
binf->sb = sb;
|
||||
atomic_set(&binf->total_inserted, 0);
|
||||
atomic64_set(&binf->access_counter, 0);
|
||||
init_waitqueue_head(&binf->waitq);
|
||||
binf->shrinker.shrink = block_shrink;
|
||||
binf->shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&binf->shrinker);
|
||||
KC_INIT_SHRINKER_FUNCS(&binf->shrinker, block_count_objects,
|
||||
block_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&binf->shrinker, "scoutfs-block:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_WORK(&binf->free_work, block_free_work);
|
||||
init_llist_head(&binf->free_llist);
|
||||
|
||||
@@ -1264,8 +1260,10 @@ int scoutfs_block_setup(struct super_block *sb)
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_block_destroy(sb);
|
||||
if (ret < 0 && binf) {
|
||||
list_lru_destroy(&binf->lru);
|
||||
kfree(binf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1276,10 +1274,11 @@ void scoutfs_block_destroy(struct super_block *sb)
|
||||
struct block_info *binf = SCOUTFS_SB(sb)->block_info;
|
||||
|
||||
if (binf) {
|
||||
unregister_shrinker(&binf->shrinker);
|
||||
block_remove_all(sb);
|
||||
KC_UNREGISTER_SHRINKER(&binf->shrinker);
|
||||
block_shrink_all(sb);
|
||||
flush_work(&binf->free_work);
|
||||
rhashtable_destroy(&binf->ht);
|
||||
list_lru_destroy(&binf->lru);
|
||||
|
||||
kfree(binf);
|
||||
sbi->block_info = NULL;
|
||||
|
||||
@@ -13,6 +13,17 @@ struct scoutfs_block {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct scoutfs_block_saved_refs {
|
||||
struct scoutfs_block_ref refs[2];
|
||||
};
|
||||
|
||||
#define DECLARE_SAVED_REFS(name) \
|
||||
struct scoutfs_block_saved_refs name = {{{0,}}}
|
||||
|
||||
int scoutfs_block_check_stale(struct super_block *sb, int ret,
|
||||
struct scoutfs_block_saved_refs *saved,
|
||||
struct scoutfs_block_ref *a, struct scoutfs_block_ref *b);
|
||||
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret);
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
|
||||
|
||||
454
kmod/src/btree.c
454
kmod/src/btree.c
@@ -1233,10 +1233,6 @@ static int btree_walk(struct super_block *sb,
|
||||
WARN_ON_ONCE((flags & (BTW_GET_PAR|BTW_SET_PAR)) && !par_root))
|
||||
return -EINVAL;
|
||||
|
||||
/* all ops come through walk and walk calls all reads */
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
scoutfs_inc_counter(sb, btree_walk);
|
||||
|
||||
restart:
|
||||
@@ -2033,187 +2029,253 @@ int scoutfs_btree_rebalance(struct super_block *sb,
|
||||
key, SCOUTFS_BTREE_MAX_VAL_LEN, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
struct merge_pos {
|
||||
struct merged_range {
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct rb_root root;
|
||||
int size;
|
||||
};
|
||||
|
||||
struct merged_item {
|
||||
struct rb_node node;
|
||||
struct scoutfs_btree_root *root;
|
||||
struct scoutfs_block *bl;
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct scoutfs_avl_node *avl;
|
||||
struct scoutfs_key *key;
|
||||
struct scoutfs_key key;
|
||||
u64 seq;
|
||||
u8 flags;
|
||||
unsigned int val_len;
|
||||
u8 *val;
|
||||
u8 val[0];
|
||||
};
|
||||
|
||||
static struct merge_pos *first_mpos(struct rb_root *root)
|
||||
static inline struct merged_item *mitem_container(struct rb_node *node)
|
||||
{
|
||||
struct rb_node *node = rb_first(root);
|
||||
if (node)
|
||||
return container_of(node, struct merge_pos, node);
|
||||
return node ? container_of(node, struct merged_item, node) : NULL;
|
||||
}
|
||||
|
||||
static inline struct merged_item *first_mitem(struct rb_root *root)
|
||||
{
|
||||
return mitem_container(rb_first(root));
|
||||
}
|
||||
|
||||
static inline struct merged_item *last_mitem(struct rb_root *root)
|
||||
{
|
||||
return mitem_container(rb_last(root));
|
||||
}
|
||||
|
||||
static inline struct merged_item *next_mitem(struct merged_item *mitem)
|
||||
{
|
||||
return mitem_container(mitem ? rb_next(&mitem->node) : NULL);
|
||||
}
|
||||
|
||||
static inline struct merged_item *prev_mitem(struct merged_item *mitem)
|
||||
{
|
||||
return mitem_container(mitem ? rb_prev(&mitem->node) : NULL);
|
||||
}
|
||||
|
||||
static struct merged_item *find_mitem(struct rb_root *root, struct scoutfs_key *key,
|
||||
struct rb_node **parent_ret, struct rb_node ***link_ret)
|
||||
{
|
||||
struct rb_node **node = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct merged_item *mitem;
|
||||
int cmp;
|
||||
|
||||
while (*node) {
|
||||
parent = *node;
|
||||
mitem = container_of(*node, struct merged_item, node);
|
||||
|
||||
cmp = scoutfs_key_compare(key, &mitem->key);
|
||||
|
||||
if (cmp < 0) {
|
||||
node = &(*node)->rb_left;
|
||||
} else if (cmp > 0) {
|
||||
node = &(*node)->rb_right;
|
||||
} else {
|
||||
*parent_ret = NULL;
|
||||
*link_ret = NULL;
|
||||
return mitem;
|
||||
}
|
||||
}
|
||||
|
||||
*parent_ret = parent;
|
||||
*link_ret = node;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct merge_pos *next_mpos(struct merge_pos *mpos)
|
||||
static void insert_mitem(struct merged_range *rng, struct merged_item *mitem,
|
||||
struct rb_node *parent, struct rb_node **link)
|
||||
{
|
||||
struct rb_node *node;
|
||||
|
||||
if (mpos && (node = rb_next(&mpos->node)))
|
||||
return container_of(node, struct merge_pos, node);
|
||||
else
|
||||
return NULL;
|
||||
rb_link_node(&mitem->node, parent, link);
|
||||
rb_insert_color(&mitem->node, &rng->root);
|
||||
rng->size += item_len_bytes(mitem->val_len);
|
||||
}
|
||||
|
||||
static void free_mpos(struct super_block *sb, struct merge_pos *mpos)
|
||||
static void replace_mitem(struct merged_range *rng, struct merged_item *victim,
|
||||
struct merged_item *new)
|
||||
{
|
||||
scoutfs_block_put(sb, mpos->bl);
|
||||
kfree(mpos);
|
||||
rb_replace_node(&victim->node, &new->node, &rng->root);
|
||||
RB_CLEAR_NODE(&victim->node);
|
||||
rng->size -= item_len_bytes(victim->val_len);
|
||||
rng->size += item_len_bytes(new->val_len);
|
||||
}
|
||||
|
||||
static void insert_mpos(struct rb_root *pos_root, struct merge_pos *ins)
|
||||
static void free_mitem(struct merged_range *rng, struct merged_item *mitem)
|
||||
{
|
||||
struct rb_node **node = &pos_root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct merge_pos *mpos;
|
||||
int cmp;
|
||||
if (IS_ERR_OR_NULL(mitem))
|
||||
return;
|
||||
|
||||
parent = NULL;
|
||||
while (*node) {
|
||||
parent = *node;
|
||||
mpos = container_of(*node, struct merge_pos, node);
|
||||
|
||||
/* sort merge items by key then newest to oldest */
|
||||
cmp = scoutfs_key_compare(ins->key, mpos->key) ?:
|
||||
-scoutfs_cmp(ins->seq, mpos->seq);
|
||||
|
||||
if (cmp < 0)
|
||||
node = &(*node)->rb_left;
|
||||
else
|
||||
node = &(*node)->rb_right;
|
||||
if (!RB_EMPTY_NODE(&mitem->node)) {
|
||||
rng->size -= item_len_bytes(mitem->val_len);
|
||||
rb_erase(&mitem->node, &rng->root);
|
||||
}
|
||||
|
||||
rb_link_node(&ins->node, parent, node);
|
||||
rb_insert_color(&ins->node, pos_root);
|
||||
kfree(mitem);
|
||||
}
|
||||
|
||||
static void trim_range_size(struct merged_range *rng, int merge_window)
|
||||
{
|
||||
struct merged_item *mitem;
|
||||
struct merged_item *tmp;
|
||||
|
||||
mitem = last_mitem(&rng->root);
|
||||
while (mitem && rng->size > merge_window) {
|
||||
|
||||
rng->end = mitem->key;
|
||||
scoutfs_key_dec(&rng->end);
|
||||
|
||||
tmp = mitem;
|
||||
mitem = prev_mitem(mitem);
|
||||
free_mitem(rng, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void trim_range_end(struct merged_range *rng)
|
||||
{
|
||||
struct merged_item *mitem;
|
||||
struct merged_item *tmp;
|
||||
|
||||
mitem = last_mitem(&rng->root);
|
||||
while (mitem && scoutfs_key_compare(&mitem->key, &rng->end) > 0) {
|
||||
tmp = mitem;
|
||||
mitem = prev_mitem(mitem);
|
||||
free_mitem(rng, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the next item in the merge_pos root in the caller's range and
|
||||
* insert it into the rbtree sorted by key and version so that merging
|
||||
* can find the next newest item at the front of the rbtree. We free
|
||||
* the mpos on error or if there are no more items in the range.
|
||||
* Record and combine logged items from log roots for merging with the
|
||||
* writable destination root. The caller is responsible for trimming
|
||||
* the range if it gets too large or if the key range shrinks.
|
||||
*/
|
||||
static int reset_mpos(struct super_block *sb, struct rb_root *pos_root, struct merge_pos *mpos,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end)
|
||||
static int merge_read_item(struct super_block *sb, struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
void *val, int val_len, void *arg)
|
||||
{
|
||||
struct scoutfs_btree_item *item;
|
||||
struct scoutfs_avl_node *next;
|
||||
struct btree_walk_key_range kr;
|
||||
struct scoutfs_key walk_key;
|
||||
int ret = 0;
|
||||
struct merged_range *rng = arg;
|
||||
struct merged_item *mitem;
|
||||
struct merged_item *found;
|
||||
struct rb_node *parent;
|
||||
struct rb_node **link;
|
||||
int ret;
|
||||
|
||||
/* always erase before freeing or inserting */
|
||||
if (!RB_EMPTY_NODE(&mpos->node)) {
|
||||
rb_erase(&mpos->node, pos_root);
|
||||
RB_CLEAR_NODE(&mpos->node);
|
||||
}
|
||||
|
||||
/*
|
||||
* advance to next item via the avl tree. The caller's pos is
|
||||
* only ever incremented past the last key so we can use next to
|
||||
* iterate rather than using search to skip past multiple items.
|
||||
*/
|
||||
if (mpos->avl)
|
||||
mpos->avl = scoutfs_avl_next(&mpos->bt->item_root, mpos->avl);
|
||||
|
||||
/* find the next leaf with the key if we run out of items */
|
||||
walk_key = *start;
|
||||
while (!mpos->avl && !scoutfs_key_is_zeros(&walk_key)) {
|
||||
scoutfs_block_put(sb, mpos->bl);
|
||||
mpos->bl = NULL;
|
||||
ret = btree_walk(sb, NULL, NULL, mpos->root, BTW_NEXT, &walk_key,
|
||||
0, &mpos->bl, &kr, NULL);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
free_mpos(sb, mpos);
|
||||
found = find_mitem(&rng->root, key, &parent, &link);
|
||||
if (found) {
|
||||
ret = scoutfs_forest_combine_deltas(key, found->val, found->val_len, val, val_len);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret > 0) {
|
||||
if (ret == SCOUTFS_DELTA_COMBINED) {
|
||||
scoutfs_inc_counter(sb, btree_merge_delta_combined);
|
||||
} else if (ret == SCOUTFS_DELTA_COMBINED_NULL) {
|
||||
scoutfs_inc_counter(sb, btree_merge_delta_null);
|
||||
free_mitem(rng, found);
|
||||
}
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
mpos->bt = mpos->bl->data;
|
||||
|
||||
mpos->avl = scoutfs_avl_search(&mpos->bt->item_root, cmp_key_item,
|
||||
start, NULL, NULL, &next, NULL) ?: next;
|
||||
if (mpos->avl == NULL)
|
||||
walk_key = kr.iter_next;
|
||||
if (found->seq >= seq) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* see if we're out of items within the range */
|
||||
item = node_item(mpos->avl);
|
||||
if (!item || scoutfs_key_compare(item_key(item), end) > 0) {
|
||||
free_mpos(sb, mpos);
|
||||
ret = 0;
|
||||
mitem = kmalloc(offsetof(struct merged_item, val[val_len]), GFP_NOFS);
|
||||
if (!mitem) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* insert the next item within range at its version */
|
||||
mpos->key = item_key(item);
|
||||
mpos->seq = le64_to_cpu(item->seq);
|
||||
mpos->flags = item->flags;
|
||||
mpos->val_len = item_val_len(item);
|
||||
mpos->val = item_val(mpos->bt, item);
|
||||
mitem->key = *key;
|
||||
mitem->seq = seq;
|
||||
mitem->flags = flags;
|
||||
mitem->val_len = val_len;
|
||||
if (val_len)
|
||||
memcpy(mitem->val, val, val_len);
|
||||
|
||||
if (found) {
|
||||
replace_mitem(rng, found, mitem);
|
||||
free_mitem(rng, found);
|
||||
} else {
|
||||
insert_mitem(rng, mitem, parent, link);
|
||||
}
|
||||
|
||||
insert_mpos(pos_root, mpos);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has reset all the merge positions for all the input log
|
||||
* btree roots and wants the next logged item it should try and merge
|
||||
* with the items in the fs_root.
|
||||
* Read a range of merged items. The caller has set the key bounds of
|
||||
* the range. We read a merge window's worth of items from blocks in
|
||||
* each input btree.
|
||||
*
|
||||
* We look ahead in the logged item stream to see if we should merge any
|
||||
* older logged delta items into one result for the caller. We also
|
||||
* take this opportunity to skip and reset the mpos for any older
|
||||
* versions of the first item.
|
||||
* The caller can only use the smallest range that overlaps with all the
|
||||
* blocks that we read. We start reading from the range's start key so
|
||||
* it will always be present and we don't need to adjust it. The final
|
||||
* block we read from each input might not cover the range's end so it
|
||||
* needs to be adjusted.
|
||||
*
|
||||
* The end range can also shrink if we have to drop items because the
|
||||
* items exceeded the merge window size.
|
||||
*/
|
||||
static int next_resolved_mpos(struct super_block *sb, struct rb_root *pos_root,
|
||||
struct scoutfs_key *end, struct merge_pos **mpos_ret)
|
||||
static int read_merged_range(struct super_block *sb, struct merged_range *rng,
|
||||
struct list_head *inputs, int merge_window)
|
||||
{
|
||||
struct merge_pos *mpos;
|
||||
struct merge_pos *next;
|
||||
struct scoutfs_btree_root_head *rhead;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
while ((mpos = first_mpos(pos_root)) && (next = next_mpos(mpos)) &&
|
||||
!scoutfs_key_compare(mpos->key, next->key)) {
|
||||
list_for_each_entry(rhead, inputs, head) {
|
||||
key = rng->start;
|
||||
|
||||
ret = scoutfs_forest_combine_deltas(mpos->key, mpos->val, mpos->val_len,
|
||||
next->val, next->val_len);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* reset advances to the next item */
|
||||
key = *mpos->key;
|
||||
scoutfs_key_inc(&key);
|
||||
|
||||
/* always skip next combined or older version */
|
||||
ret = reset_mpos(sb, pos_root, next, &key, end);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (ret == SCOUTFS_DELTA_COMBINED) {
|
||||
scoutfs_inc_counter(sb, btree_merge_delta_combined);
|
||||
} else if (ret == SCOUTFS_DELTA_COMBINED_NULL) {
|
||||
scoutfs_inc_counter(sb, btree_merge_delta_null);
|
||||
/* if merging resulted in no info, skip current */
|
||||
ret = reset_mpos(sb, pos_root, mpos, &key, end);
|
||||
for (i = 0; i < merge_window; i += SCOUTFS_BLOCK_LG_SIZE) {
|
||||
start = key;
|
||||
end = rng->end;
|
||||
ret = scoutfs_btree_read_items(sb, &rhead->root, &key, &start, &end,
|
||||
merge_read_item, rng);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_key_compare(&end, &rng->end) >= 0)
|
||||
break;
|
||||
|
||||
key = end;
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
if (scoutfs_key_compare(&end, &rng->end) < 0) {
|
||||
rng->end = end;
|
||||
trim_range_end(rng);
|
||||
}
|
||||
|
||||
if (rng->size > merge_window)
|
||||
trim_range_size(rng, merge_window);
|
||||
}
|
||||
|
||||
*mpos_ret = mpos;
|
||||
trace_scoutfs_btree_merge_read_range(sb, &rng->start, &rng->end, rng->size);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2230,6 +2292,13 @@ static int next_resolved_mpos(struct super_block *sb, struct rb_root *pos_root,
|
||||
* to allocators running low or needing to join/split the parent.
|
||||
* *next_ret is set to the next key which hasn't been merged so that the
|
||||
* caller can retry with a new allocator and subtree.
|
||||
*
|
||||
* The number of input roots can be immense. The merge_window specifies
|
||||
* the size of the set of merged items that we'll maintain as we iterate
|
||||
* over all the input roots. Once we've merged items into the window
|
||||
* from all the input roots the merged input items are then merged to
|
||||
* the writable destination root. It may take multiple passes of
|
||||
* windows of merged items to cover the input key range.
|
||||
*/
|
||||
int scoutfs_btree_merge(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
@@ -2239,18 +2308,16 @@ int scoutfs_btree_merge(struct super_block *sb,
|
||||
struct scoutfs_key *next_ret,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct list_head *inputs,
|
||||
bool subtree, int dirty_limit, int alloc_low)
|
||||
bool subtree, int dirty_limit, int alloc_low, int merge_window)
|
||||
{
|
||||
struct scoutfs_btree_root_head *rhead;
|
||||
struct rb_root pos_root = RB_ROOT;
|
||||
struct scoutfs_btree_item *item;
|
||||
struct scoutfs_btree_block *bt;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct btree_walk_key_range kr;
|
||||
struct scoutfs_avl_node *par;
|
||||
struct scoutfs_key next;
|
||||
struct merge_pos *mpos;
|
||||
struct merge_pos *tmp;
|
||||
struct merged_item *mitem;
|
||||
struct merged_item *tmp;
|
||||
struct merged_range rng;
|
||||
int walk_val_len;
|
||||
int walk_flags;
|
||||
bool is_del;
|
||||
@@ -2261,49 +2328,59 @@ int scoutfs_btree_merge(struct super_block *sb,
|
||||
trace_scoutfs_btree_merge(sb, root, start, end);
|
||||
scoutfs_inc_counter(sb, btree_merge);
|
||||
|
||||
list_for_each_entry(rhead, inputs, head) {
|
||||
mpos = kzalloc(sizeof(*mpos), GFP_NOFS);
|
||||
if (!mpos) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
RB_CLEAR_NODE(&mpos->node);
|
||||
mpos->root = &rhead->root;
|
||||
|
||||
ret = reset_mpos(sb, &pos_root, mpos, start, end);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
walk_flags = BTW_DIRTY;
|
||||
if (subtree)
|
||||
walk_flags |= BTW_SUBTREE;
|
||||
walk_val_len = 0;
|
||||
|
||||
while ((ret = next_resolved_mpos(sb, &pos_root, end, &mpos)) == 0 && mpos) {
|
||||
rng.start = *start;
|
||||
rng.end = *end;
|
||||
rng.root = RB_ROOT;
|
||||
rng.size = 0;
|
||||
|
||||
ret = read_merged_range(sb, &rng, inputs, merge_window);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
for (;;) {
|
||||
/* read next window as it empties (and it is possible to read an empty range) */
|
||||
mitem = first_mitem(&rng.root);
|
||||
if (!mitem) {
|
||||
/* done if the read range hit the end */
|
||||
if (scoutfs_key_compare(&rng.end, end) >= 0)
|
||||
break;
|
||||
|
||||
/* read next batch of merged items */
|
||||
rng.start = rng.end;
|
||||
scoutfs_key_inc(&rng.start);
|
||||
rng.end = *end;
|
||||
ret = read_merged_range(sb, &rng, inputs, merge_window);
|
||||
if (ret < 0)
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (scoutfs_block_writer_dirty_bytes(sb, wri) >= dirty_limit) {
|
||||
scoutfs_inc_counter(sb, btree_merge_dirty_limit);
|
||||
ret = -ERANGE;
|
||||
*next_ret = *mpos->key;
|
||||
*next_ret = mitem->key;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (scoutfs_alloc_meta_low(sb, alloc, alloc_low)) {
|
||||
scoutfs_inc_counter(sb, btree_merge_alloc_low);
|
||||
ret = -ERANGE;
|
||||
*next_ret = *mpos->key;
|
||||
*next_ret = mitem->key;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
ret = btree_walk(sb, alloc, wri, root, walk_flags,
|
||||
mpos->key, walk_val_len, &bl, &kr, NULL);
|
||||
&mitem->key, walk_val_len, &bl, &kr, NULL);
|
||||
if (ret < 0) {
|
||||
if (ret == -ERANGE)
|
||||
*next_ret = *mpos->key;
|
||||
*next_ret = mitem->key;
|
||||
goto out;
|
||||
}
|
||||
bt = bl->data;
|
||||
@@ -2315,22 +2392,21 @@ int scoutfs_btree_merge(struct super_block *sb,
|
||||
continue;
|
||||
}
|
||||
|
||||
while ((ret = next_resolved_mpos(sb, &pos_root, end, &mpos)) == 0 && mpos) {
|
||||
|
||||
while (mitem) {
|
||||
/* walk to new leaf if we exceed parent ref key */
|
||||
if (scoutfs_key_compare(mpos->key, &kr.end) > 0)
|
||||
if (scoutfs_key_compare(&mitem->key, &kr.end) > 0)
|
||||
break;
|
||||
|
||||
/* see if there's an existing item */
|
||||
item = leaf_item_hash_search(sb, bt, mpos->key);
|
||||
is_del = !!(mpos->flags & SCOUTFS_ITEM_FLAG_DELETION);
|
||||
item = leaf_item_hash_search(sb, bt, &mitem->key);
|
||||
is_del = !!(mitem->flags & SCOUTFS_ITEM_FLAG_DELETION);
|
||||
|
||||
/* see if we're merging delta items */
|
||||
if (item && !is_del)
|
||||
delta = scoutfs_forest_combine_deltas(mpos->key,
|
||||
delta = scoutfs_forest_combine_deltas(&mitem->key,
|
||||
item_val(bt, item),
|
||||
item_val_len(item),
|
||||
mpos->val, mpos->val_len);
|
||||
mitem->val, mitem->val_len);
|
||||
else
|
||||
delta = 0;
|
||||
if (delta < 0) {
|
||||
@@ -2342,40 +2418,38 @@ int scoutfs_btree_merge(struct super_block *sb,
|
||||
scoutfs_inc_counter(sb, btree_merge_delta_null);
|
||||
}
|
||||
|
||||
trace_scoutfs_btree_merge_items(sb, mpos->root,
|
||||
mpos->key, mpos->val_len,
|
||||
trace_scoutfs_btree_merge_items(sb, &mitem->key, mitem->val_len,
|
||||
item ? root : NULL,
|
||||
item ? item_key(item) : NULL,
|
||||
item ? item_val_len(item) : 0, is_del);
|
||||
|
||||
/* rewalk and split if ins/update needs room */
|
||||
if (!is_del && !delta && !mid_free_item_room(bt, mpos->val_len)) {
|
||||
if (!is_del && !delta && !mid_free_item_room(bt, mitem->val_len)) {
|
||||
walk_flags |= BTW_INSERT;
|
||||
walk_val_len = mpos->val_len;
|
||||
walk_val_len = mitem->val_len;
|
||||
break;
|
||||
}
|
||||
|
||||
/* insert missing non-deletion merge items */
|
||||
if (!item && !is_del) {
|
||||
scoutfs_avl_search(&bt->item_root,
|
||||
cmp_key_item, mpos->key,
|
||||
scoutfs_avl_search(&bt->item_root, cmp_key_item, &mitem->key,
|
||||
&cmp, &par, NULL, NULL);
|
||||
create_item(bt, mpos->key, mpos->seq, mpos->flags,
|
||||
mpos->val, mpos->val_len, par, cmp);
|
||||
create_item(bt, &mitem->key, mitem->seq, mitem->flags,
|
||||
mitem->val, mitem->val_len, par, cmp);
|
||||
scoutfs_inc_counter(sb, btree_merge_insert);
|
||||
}
|
||||
|
||||
/* update existing items */
|
||||
if (item && !is_del && !delta) {
|
||||
item->seq = cpu_to_le64(mpos->seq);
|
||||
item->flags = mpos->flags;
|
||||
update_item_value(bt, item, mpos->val, mpos->val_len);
|
||||
item->seq = cpu_to_le64(mitem->seq);
|
||||
item->flags = mitem->flags;
|
||||
update_item_value(bt, item, mitem->val, mitem->val_len);
|
||||
scoutfs_inc_counter(sb, btree_merge_update);
|
||||
}
|
||||
|
||||
/* update combined delta item seq */
|
||||
if (delta == SCOUTFS_DELTA_COMBINED) {
|
||||
item->seq = cpu_to_le64(mpos->seq);
|
||||
item->seq = cpu_to_le64(mitem->seq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2407,21 +2481,18 @@ int scoutfs_btree_merge(struct super_block *sb,
|
||||
walk_flags &= ~(BTW_INSERT | BTW_DELETE);
|
||||
walk_val_len = 0;
|
||||
|
||||
/* finished with this key, skip any older items */
|
||||
next = *mpos->key;
|
||||
scoutfs_key_inc(&next);
|
||||
ret = reset_mpos(sb, &pos_root, mpos, &next, end);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
/* finished with this merged item */
|
||||
tmp = mitem;
|
||||
mitem = next_mitem(mitem);
|
||||
free_mitem(&rng, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
scoutfs_block_put(sb, bl);
|
||||
rbtree_postorder_for_each_entry_safe(mpos, tmp, &pos_root, node) {
|
||||
free_mpos(sb, mpos);
|
||||
}
|
||||
rbtree_postorder_for_each_entry_safe(mitem, tmp, &rng.root, node)
|
||||
free_mitem(&rng, mitem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -2453,7 +2524,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int alloc_low)
|
||||
struct scoutfs_btree_root *root, int free_budget)
|
||||
{
|
||||
u64 blknos[SCOUTFS_BTREE_MAX_HEIGHT];
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -2463,11 +2534,15 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_avl_node *node;
|
||||
struct scoutfs_avl_node *next;
|
||||
struct scoutfs_key par_next;
|
||||
int nr_freed = 0;
|
||||
int nr_par;
|
||||
int level;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(free_budget <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(root->height > ARRAY_SIZE(blknos)))
|
||||
return -EIO; /* XXX corruption */
|
||||
|
||||
@@ -2542,8 +2617,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
while (node) {
|
||||
|
||||
/* make sure we can always free parents after leaves */
|
||||
if (scoutfs_alloc_meta_low(sb, alloc,
|
||||
alloc_low + nr_par + 1)) {
|
||||
if ((nr_freed + 1 + nr_par) > free_budget) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -2557,6 +2631,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
le64_to_cpu(ref.blkno));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
nr_freed++;
|
||||
|
||||
node = scoutfs_avl_next(&bt->item_root, node);
|
||||
if (node) {
|
||||
@@ -2572,6 +2647,7 @@ int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
blknos[i]);
|
||||
ret = scoutfs_free_meta(sb, alloc, wri, blknos[i]);
|
||||
BUG_ON(ret); /* checked meta low, freed should fit */
|
||||
nr_freed++;
|
||||
}
|
||||
|
||||
/* restart walk past the subtree we just freed */
|
||||
|
||||
@@ -119,13 +119,13 @@ int scoutfs_btree_merge(struct super_block *sb,
|
||||
struct scoutfs_key *next_ret,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct list_head *input_list,
|
||||
bool subtree, int dirty_limit, int alloc_low);
|
||||
bool subtree, int dirty_limit, int alloc_low, int merge_window);
|
||||
|
||||
int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int alloc_low);
|
||||
struct scoutfs_btree_root *root, int free_budget);
|
||||
|
||||
void scoutfs_btree_put_iref(struct scoutfs_btree_item_ref *iref);
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -68,6 +69,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_net_inode_alloc ial;
|
||||
__le64 lecount = cpu_to_le64(count);
|
||||
u64 tmp;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
@@ -80,7 +82,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
|
||||
if (*nr == 0)
|
||||
ret = -ENOSPC;
|
||||
else if (*ino + *nr < *ino)
|
||||
else if (check_add_overflow(*ino, *nr - 1, &tmp))
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -356,7 +358,6 @@ static int client_greeting(struct super_block *sb,
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct client_info *client = sbi->client_info;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_net_greeting *gr = resp;
|
||||
bool new_server;
|
||||
int ret;
|
||||
@@ -371,9 +372,9 @@ static int client_greeting(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->fsid != super->hdr.fsid) {
|
||||
if (gr->fsid != cpu_to_le64(sbi->fsid)) {
|
||||
scoutfs_warn(sb, "server greeting response fsid 0x%llx did not match client fsid 0x%llx",
|
||||
le64_to_cpu(gr->fsid), le64_to_cpu(super->hdr.fsid));
|
||||
le64_to_cpu(gr->fsid), sbi->fsid);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -476,13 +477,15 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
bool am_quorum;
|
||||
int ret;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
am_quorum = opts.quorum_slot_nr >= 0;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
@@ -505,7 +508,7 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
goto out;
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.fsid = cpu_to_le64(sbi->fsid);
|
||||
greet.fmt_vers = cpu_to_le64(sbi->fmt_vers);
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
@@ -668,3 +671,11 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
kfree(client);
|
||||
sbi->client_info = NULL;
|
||||
}
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
if (client && client->conn)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_op
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd);
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst);
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb);
|
||||
int scoutfs_client_setup(struct super_block *sb);
|
||||
void scoutfs_client_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -26,15 +26,15 @@
|
||||
EXPAND_COUNTER(block_cache_alloc_page_order) \
|
||||
EXPAND_COUNTER(block_cache_alloc_virt) \
|
||||
EXPAND_COUNTER(block_cache_end_io_error) \
|
||||
EXPAND_COUNTER(block_cache_isolate_removed) \
|
||||
EXPAND_COUNTER(block_cache_isolate_rotate) \
|
||||
EXPAND_COUNTER(block_cache_isolate_skip) \
|
||||
EXPAND_COUNTER(block_cache_forget) \
|
||||
EXPAND_COUNTER(block_cache_free) \
|
||||
EXPAND_COUNTER(block_cache_free_work) \
|
||||
EXPAND_COUNTER(block_cache_remove_stale) \
|
||||
EXPAND_COUNTER(block_cache_shrink) \
|
||||
EXPAND_COUNTER(block_cache_shrink_next) \
|
||||
EXPAND_COUNTER(block_cache_shrink_recent) \
|
||||
EXPAND_COUNTER(block_cache_shrink_remove) \
|
||||
EXPAND_COUNTER(block_cache_shrink_restart) \
|
||||
EXPAND_COUNTER(block_cache_count_objects) \
|
||||
EXPAND_COUNTER(block_cache_scan_objects) \
|
||||
EXPAND_COUNTER(btree_compact_values) \
|
||||
EXPAND_COUNTER(btree_compact_values_enomem) \
|
||||
EXPAND_COUNTER(btree_delete) \
|
||||
@@ -75,8 +75,6 @@
|
||||
EXPAND_COUNTER(data_write_begin_enobufs_retry) \
|
||||
EXPAND_COUNTER(dentry_revalidate_error) \
|
||||
EXPAND_COUNTER(dentry_revalidate_invalid) \
|
||||
EXPAND_COUNTER(dentry_revalidate_locked) \
|
||||
EXPAND_COUNTER(dentry_revalidate_orphan) \
|
||||
EXPAND_COUNTER(dentry_revalidate_rcu) \
|
||||
EXPAND_COUNTER(dentry_revalidate_root) \
|
||||
EXPAND_COUNTER(dentry_revalidate_valid) \
|
||||
@@ -90,6 +88,9 @@
|
||||
EXPAND_COUNTER(forest_read_items) \
|
||||
EXPAND_COUNTER(forest_roots_next_hint) \
|
||||
EXPAND_COUNTER(forest_set_bloom_bits) \
|
||||
EXPAND_COUNTER(inode_deleted) \
|
||||
EXPAND_COUNTER(item_cache_count_objects) \
|
||||
EXPAND_COUNTER(item_cache_scan_objects) \
|
||||
EXPAND_COUNTER(item_clear_dirty) \
|
||||
EXPAND_COUNTER(item_create) \
|
||||
EXPAND_COUNTER(item_delete) \
|
||||
@@ -115,14 +116,16 @@
|
||||
EXPAND_COUNTER(item_pcpu_page_hit) \
|
||||
EXPAND_COUNTER(item_pcpu_page_miss) \
|
||||
EXPAND_COUNTER(item_pcpu_page_miss_keys) \
|
||||
EXPAND_COUNTER(item_read_pages_barrier) \
|
||||
EXPAND_COUNTER(item_read_pages_retry) \
|
||||
EXPAND_COUNTER(item_read_pages_split) \
|
||||
EXPAND_COUNTER(item_shrink_page) \
|
||||
EXPAND_COUNTER(item_shrink_page_dirty) \
|
||||
EXPAND_COUNTER(item_shrink_page_reader) \
|
||||
EXPAND_COUNTER(item_shrink_page_trylock) \
|
||||
EXPAND_COUNTER(item_update) \
|
||||
EXPAND_COUNTER(item_write_dirty) \
|
||||
EXPAND_COUNTER(lock_alloc) \
|
||||
EXPAND_COUNTER(lock_count_objects) \
|
||||
EXPAND_COUNTER(lock_free) \
|
||||
EXPAND_COUNTER(lock_grant_request) \
|
||||
EXPAND_COUNTER(lock_grant_response) \
|
||||
@@ -136,11 +139,14 @@
|
||||
EXPAND_COUNTER(lock_lock_error) \
|
||||
EXPAND_COUNTER(lock_nonblock_eagain) \
|
||||
EXPAND_COUNTER(lock_recover_request) \
|
||||
EXPAND_COUNTER(lock_scan_objects) \
|
||||
EXPAND_COUNTER(lock_shrink_attempted) \
|
||||
EXPAND_COUNTER(lock_shrink_aborted) \
|
||||
EXPAND_COUNTER(lock_shrink_work) \
|
||||
EXPAND_COUNTER(lock_unlock) \
|
||||
EXPAND_COUNTER(lock_wait) \
|
||||
EXPAND_COUNTER(log_merge_no_finalized) \
|
||||
EXPAND_COUNTER(log_merge_wait_timeout) \
|
||||
EXPAND_COUNTER(net_dropped_response) \
|
||||
EXPAND_COUNTER(net_send_bytes) \
|
||||
EXPAND_COUNTER(net_send_error) \
|
||||
@@ -152,11 +158,14 @@
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_attempts) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(orphan_scan_read) \
|
||||
EXPAND_COUNTER(quota_info_count_objects) \
|
||||
EXPAND_COUNTER(quota_info_scan_objects) \
|
||||
EXPAND_COUNTER(quorum_candidate_server_stopping) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
@@ -167,11 +176,13 @@
|
||||
EXPAND_COUNTER(quorum_recv_resignation) \
|
||||
EXPAND_COUNTER(quorum_recv_vote) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat_dropped) \
|
||||
EXPAND_COUNTER(quorum_send_resignation) \
|
||||
EXPAND_COUNTER(quorum_send_request) \
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
EXPAND_COUNTER(quorum_server_shutdown) \
|
||||
EXPAND_COUNTER(quorum_term_follower) \
|
||||
EXPAND_COUNTER(reclaimed_open_logs) \
|
||||
EXPAND_COUNTER(server_commit_hold) \
|
||||
EXPAND_COUNTER(server_commit_queue) \
|
||||
EXPAND_COUNTER(server_commit_worker) \
|
||||
@@ -188,26 +199,23 @@
|
||||
EXPAND_COUNTER(srch_search_retry_empty) \
|
||||
EXPAND_COUNTER(srch_search_sorted) \
|
||||
EXPAND_COUNTER(srch_search_sorted_block) \
|
||||
EXPAND_COUNTER(srch_search_stale_eio) \
|
||||
EXPAND_COUNTER(srch_search_stale_retry) \
|
||||
EXPAND_COUNTER(srch_search_xattrs) \
|
||||
EXPAND_COUNTER(srch_read_stale) \
|
||||
EXPAND_COUNTER(statfs) \
|
||||
EXPAND_COUNTER(totl_read_copied) \
|
||||
EXPAND_COUNTER(totl_read_finalized) \
|
||||
EXPAND_COUNTER(totl_read_fs) \
|
||||
EXPAND_COUNTER(totl_read_item) \
|
||||
EXPAND_COUNTER(totl_read_logged) \
|
||||
EXPAND_COUNTER(trans_commit_data_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_dirty_meta_full) \
|
||||
EXPAND_COUNTER(trans_commit_fsync) \
|
||||
EXPAND_COUNTER(trans_commit_meta_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_sync_fs) \
|
||||
EXPAND_COUNTER(trans_commit_timer) \
|
||||
EXPAND_COUNTER(trans_commit_written)
|
||||
EXPAND_COUNTER(trans_commit_written) \
|
||||
EXPAND_COUNTER(wkic_count_objects) \
|
||||
EXPAND_COUNTER(wkic_scan_objects)
|
||||
|
||||
#define FIRST_COUNTER alloc_alloc_data
|
||||
#define LAST_COUNTER trans_commit_written
|
||||
#define LAST_COUNTER wkic_scan_objects
|
||||
|
||||
#undef EXPAND_COUNTER
|
||||
#define EXPAND_COUNTER(which) struct percpu_counter which;
|
||||
@@ -234,12 +242,12 @@ struct scoutfs_counters {
|
||||
#define SCOUTFS_PCPU_COUNTER_BATCH (1 << 30)
|
||||
|
||||
#define scoutfs_inc_counter(sb, which) \
|
||||
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, 1, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, 1, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
|
||||
#define scoutfs_add_counter(sb, which, cnt) \
|
||||
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, cnt, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, cnt, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
|
||||
void __init scoutfs_init_counters(void);
|
||||
int scoutfs_setup_counters(struct super_block *sb);
|
||||
|
||||
700
kmod/src/data.c
700
kmod/src/data.c
File diff suppressed because it is too large
Load Diff
@@ -43,6 +43,9 @@ extern const struct file_operations scoutfs_file_fops;
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
|
||||
int scoutfs_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh,
|
||||
int create);
|
||||
|
||||
int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
u64 ino, u64 iblock, u64 last, bool offline,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
887
kmod/src/dir.c
887
kmod/src/dir.c
File diff suppressed because it is too large
Load Diff
@@ -5,14 +5,22 @@
|
||||
#include "lock.h"
|
||||
|
||||
extern const struct file_operations scoutfs_dir_fops;
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
extern const struct inode_operations_wrapper scoutfs_dir_iops;
|
||||
#else
|
||||
extern const struct inode_operations scoutfs_dir_iops;
|
||||
#endif
|
||||
extern const struct inode_operations scoutfs_symlink_iops;
|
||||
|
||||
extern const struct dentry_operations scoutfs_dentry_ops;
|
||||
|
||||
struct scoutfs_link_backref_entry {
|
||||
struct list_head head;
|
||||
u64 dir_ino;
|
||||
u64 dir_pos;
|
||||
u16 name_len;
|
||||
u8 d_type;
|
||||
bool last;
|
||||
struct scoutfs_dirent dent;
|
||||
/* the full name is allocated and stored in dent.name[] */
|
||||
};
|
||||
@@ -22,14 +30,10 @@ int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,
|
||||
void scoutfs_dir_free_backref_path(struct super_block *sb,
|
||||
struct list_head *list);
|
||||
|
||||
int scoutfs_dir_add_next_linkref(struct super_block *sb, u64 ino,
|
||||
u64 dir_ino, u64 dir_pos,
|
||||
struct list_head *list);
|
||||
int scoutfs_dir_add_next_linkrefs(struct super_block *sb, u64 ino, u64 dir_ino, u64 dir_pos,
|
||||
int count, struct list_head *list);
|
||||
|
||||
int scoutfs_symlink_drop(struct super_block *sb, u64 ino,
|
||||
struct scoutfs_lock *lock, u64 i_size);
|
||||
|
||||
int scoutfs_dir_init(void);
|
||||
void scoutfs_dir_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -81,7 +81,7 @@ static struct dentry *scoutfs_fh_to_dentry(struct super_block *sb,
|
||||
trace_scoutfs_fh_to_dentry(sb, fh_type, sfid);
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type))
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino), 0);
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino), 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -100,7 +100,7 @@ static struct dentry *scoutfs_fh_to_parent(struct super_block *sb,
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type) &&
|
||||
fh_type == FILEID_SCOUTFS_WITH_PARENT)
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino), 0);
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino), 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -114,8 +114,8 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
int ret;
|
||||
u64 ino;
|
||||
|
||||
ret = scoutfs_dir_add_next_linkref(sb, scoutfs_ino(inode), 0, 0, &list);
|
||||
if (ret)
|
||||
ret = scoutfs_dir_add_next_linkrefs(sb, scoutfs_ino(inode), 0, 0, 1, &list);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ent = list_first_entry(&list, struct scoutfs_link_backref_entry, head);
|
||||
@@ -123,7 +123,7 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
scoutfs_dir_free_backref_path(sb, &list);
|
||||
trace_scoutfs_get_parent(sb, inode, ino);
|
||||
|
||||
inode = scoutfs_iget(sb, ino, 0);
|
||||
inode = scoutfs_iget(sb, ino, 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -138,9 +138,9 @@ static int scoutfs_get_name(struct dentry *parent, char *name,
|
||||
LIST_HEAD(list);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_dir_add_next_linkref(sb, scoutfs_ino(inode), dir_ino,
|
||||
0, &list);
|
||||
if (ret)
|
||||
ret = scoutfs_dir_add_next_linkrefs(sb, scoutfs_ino(inode), dir_ino,
|
||||
0, 1, &list);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = -ENOENT;
|
||||
|
||||
@@ -105,12 +105,12 @@ static ssize_t elapsed_secs_show(struct kobject *kobj,
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
ktime_t now = ktime_get();
|
||||
struct timeval tv = { 0, };
|
||||
ktime_t t = ns_to_ktime(0);
|
||||
|
||||
if (ktime_after(now, fence->start_kt))
|
||||
tv = ktime_to_timeval(ktime_sub(now, fence->start_kt));
|
||||
t = ktime_sub(now, fence->start_kt);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)tv.tv_sec);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)ktime_divns(t, NSEC_PER_SEC));
|
||||
}
|
||||
SCOUTFS_ATTR_RO(elapsed_secs);
|
||||
|
||||
@@ -395,12 +395,13 @@ int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies)
|
||||
int scoutfs_fence_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct fence_info *fi;
|
||||
int ret;
|
||||
|
||||
/* can only fence if we can be elected by quorum */
|
||||
if (opts->quorum_slot_nr == -1) {
|
||||
scoutfs_options_read(sb, &opts);
|
||||
if (opts.quorum_slot_nr == -1) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
156
kmod/src/file.c
156
kmod/src/file.c
@@ -28,7 +28,9 @@
|
||||
#include "inode.h"
|
||||
#include "per_task.h"
|
||||
#include "omap.h"
|
||||
#include "quota.h"
|
||||
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
/*
|
||||
* Start a high level file read. We check for offline extents in the
|
||||
* read region here so that we only check the extents once. We use the
|
||||
@@ -42,27 +44,27 @@ ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
inode_unlock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
ret = scoutfs_data_wait_check_iov(inode, iov, nr_segs, pos,
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ,
|
||||
&dw, inode_lock);
|
||||
&dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
@@ -74,7 +76,7 @@ retry:
|
||||
out:
|
||||
inode_dio_done(inode);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
@@ -92,7 +94,7 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
@@ -101,34 +103,42 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, inode_lock);
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode),
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE,
|
||||
&dw, inode_lock);
|
||||
&dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
@@ -146,8 +156,119 @@ out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
int scoutfs_permission(struct inode *inode, int mask)
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
inode_lock(inode);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
inode_unlock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
ret = scoutfs_data_wait_check(inode, iocb->ki_pos, iov_iter_count(to), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
|
||||
out:
|
||||
inode_dio_end(inode);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
ssize_t ret;
|
||||
|
||||
retry:
|
||||
inode_lock(inode);
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = generic_write_checks(iocb, from);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_write_iter(iocb, from);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
@@ -161,7 +282,8 @@ int scoutfs_permission(struct inode *inode, int mask)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = generic_permission(inode, mask);
|
||||
ret = generic_permission(KC_VFS_INIT_NS
|
||||
inode, mask);
|
||||
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
#ifndef _SCOUTFS_FILE_H_
|
||||
#define _SCOUTFS_FILE_H_
|
||||
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
int scoutfs_permission(struct inode *inode, int mask);
|
||||
#else
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *, struct iov_iter *);
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *, struct iov_iter *);
|
||||
#endif
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask);
|
||||
loff_t scoutfs_file_llseek(struct file *file, loff_t offset, int whence);
|
||||
|
||||
#endif /* _SCOUTFS_FILE_H_ */
|
||||
|
||||
@@ -78,11 +78,6 @@ struct forest_refs {
|
||||
struct scoutfs_block_ref logs_ref;
|
||||
};
|
||||
|
||||
/* initialize some refs that initially aren't equal */
|
||||
#define DECLARE_STALE_TRACKING_SUPER_REFS(a, b) \
|
||||
struct forest_refs a = {{cpu_to_le64(0),}}; \
|
||||
struct forest_refs b = {{cpu_to_le64(1),}}
|
||||
|
||||
struct forest_bloom_nrs {
|
||||
unsigned int nrs[SCOUTFS_FOREST_BLOOM_NRS];
|
||||
};
|
||||
@@ -136,11 +131,11 @@ static struct scoutfs_block *read_bloom_ref(struct super_block *sb, struct scout
|
||||
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *next)
|
||||
{
|
||||
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_btree_root item_root;
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
struct scoutfs_key found;
|
||||
struct scoutfs_key ltk;
|
||||
bool checked_fs;
|
||||
@@ -155,8 +150,6 @@ retry:
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
refs.fs_ref = roots.fs_root.ref;
|
||||
refs.logs_ref = roots.logs_root.ref;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
checked_fs = false;
|
||||
@@ -212,14 +205,10 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
|
||||
return -EIO;
|
||||
prev_refs = refs;
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.fs_root.ref, &roots.logs_root.ref);
|
||||
if (ret == -ESTALE)
|
||||
goto retry;
|
||||
}
|
||||
out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -249,19 +238,16 @@ static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u6
|
||||
* We return -ESTALE if we hit stale blocks to give the caller a chance
|
||||
* to reset their state and retry with a newer version of the btrees.
|
||||
*/
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
struct forest_read_items_data rid = {
|
||||
.cb = cb,
|
||||
.cb_arg = arg,
|
||||
};
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_bloom_block *bb;
|
||||
struct forest_bloom_nrs bloom;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
@@ -275,18 +261,14 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
scoutfs_inc_counter(sb, forest_read_items);
|
||||
calc_bloom_nrs(&bloom, bloom_key);
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
trace_scoutfs_forest_using_roots(sb, &roots->fs_root, &roots->logs_root);
|
||||
|
||||
*start = orig_start;
|
||||
*end = orig_end;
|
||||
|
||||
/* start with fs root items */
|
||||
rid.fic |= FIC_FS_ROOT;
|
||||
ret = scoutfs_btree_read_items(sb, &roots.fs_root, key, start, end,
|
||||
ret = scoutfs_btree_read_items(sb, &roots->fs_root, key, start, end,
|
||||
forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -294,7 +276,7 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &roots.logs_root, <k, &iref);
|
||||
ret = scoutfs_btree_next(sb, &roots->logs_root, <k, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(lt)) {
|
||||
ltk = *iref.key;
|
||||
@@ -351,6 +333,23 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
struct scoutfs_net_roots roots;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_forest_read_items_roots(sb, &roots, key, bloom_key, start, end,
|
||||
cb, arg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the items are deltas then combine the src with the destination
|
||||
* value and store the result in the destination.
|
||||
@@ -541,9 +540,8 @@ void scoutfs_forest_dec_inode_count(struct super_block *sb)
|
||||
|
||||
/*
|
||||
* Return the total inode count from the super block and all the
|
||||
* log_btrees it references. This assumes it's working with a block
|
||||
* reference hierarchy that should be fully consistent. If we see
|
||||
* ESTALE we've hit persistent corruption.
|
||||
* log_btrees it references. ESTALE from read blocks is returned to the
|
||||
* caller who is expected to retry or return hard errors.
|
||||
*/
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count)
|
||||
@@ -572,8 +570,6 @@ int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_bloc
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
else if (ret == -ESTALE)
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -735,7 +731,8 @@ static void scoutfs_forest_log_merge_worker(struct work_struct *work)
|
||||
ret = scoutfs_btree_merge(sb, &alloc, &wri, &req.start, &req.end,
|
||||
&next, &comp.root, &inputs,
|
||||
!!(req.flags & cpu_to_le64(SCOUTFS_LOG_MERGE_REQUEST_SUBTREE)),
|
||||
SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT, 10);
|
||||
SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT, 10,
|
||||
(2 * 1024 * 1024));
|
||||
if (ret == -ERANGE) {
|
||||
comp.remain = next;
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_REMAIN);
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
struct scoutfs_block;
|
||||
struct scoutfs_lock;
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
@@ -23,6 +24,10 @@ int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock);
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq);
|
||||
|
||||
@@ -8,9 +8,14 @@
|
||||
*/
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN_STR __stringify(SCOUTFS_FORMAT_VERSION_MIN)
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 2
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX_STR __stringify(SCOUTFS_FORMAT_VERSION_MAX)
|
||||
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_RETENTION 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_QUOTA 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_INDX_TAG 2
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
|
||||
@@ -175,6 +180,10 @@ struct scoutfs_key {
|
||||
#define sko_rid _sk_first
|
||||
#define sko_ino _sk_second
|
||||
|
||||
/* quota rules */
|
||||
#define skqr_hash _sk_second
|
||||
#define skqr_coll_nr _sk_third
|
||||
|
||||
/* xattr totl */
|
||||
#define skxt_a _sk_first
|
||||
#define skxt_b _sk_second
|
||||
@@ -461,7 +470,7 @@ struct scoutfs_srch_compact {
|
||||
* @get_trans_seq, @commit_trans_seq: These pair of sequence numbers
|
||||
* determine if a transaction is currently open for the mount that owns
|
||||
* the log_trees struct. get_trans_seq is advanced by the server as the
|
||||
* transaction is opened. The server sets comimt_trans_seq equal to
|
||||
* transaction is opened. The server sets commit_trans_seq equal to
|
||||
* get_ as the transaction is committed.
|
||||
*/
|
||||
struct scoutfs_log_trees {
|
||||
@@ -585,7 +594,9 @@ struct scoutfs_log_merge_freeing {
|
||||
*/
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 4
|
||||
#define SCOUTFS_ORPHAN_ZONE 8
|
||||
#define SCOUTFS_QUOTA_ZONE 10
|
||||
#define SCOUTFS_XATTR_TOTL_ZONE 12
|
||||
#define SCOUTFS_XATTR_INDX_ZONE 14
|
||||
#define SCOUTFS_FS_ZONE 16
|
||||
#define SCOUTFS_LOCK_ZONE 20
|
||||
/* Items only stored in server btrees */
|
||||
@@ -608,6 +619,9 @@ struct scoutfs_log_merge_freeing {
|
||||
/* orphan zone, redundant type used for clarity */
|
||||
#define SCOUTFS_ORPHAN_TYPE 4
|
||||
|
||||
/* quota zone */
|
||||
#define SCOUTFS_QUOTA_RULE_TYPE 4
|
||||
|
||||
/* fs zone */
|
||||
#define SCOUTFS_INODE_TYPE 4
|
||||
#define SCOUTFS_XATTR_TYPE 8
|
||||
@@ -661,6 +675,34 @@ struct scoutfs_xattr_totl_val {
|
||||
__le64 count;
|
||||
};
|
||||
|
||||
#define SQ_RF_TOTL_COUNT (1 << 0)
|
||||
#define SQ_RF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_NS_LITERAL 0
|
||||
#define SQ_NS_PROJ 1
|
||||
#define SQ_NS_UID 2
|
||||
#define SQ_NS_GID 3
|
||||
#define SQ_NS__NR 4
|
||||
#define SQ_NS__NR_SELECT (SQ_NS__NR - 1) /* !literal */
|
||||
|
||||
#define SQ_NF_SELECT (1 << 0)
|
||||
#define SQ_NF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_OP_INODE 0
|
||||
#define SQ_OP_DATA 1
|
||||
#define SQ_OP__NR 2
|
||||
|
||||
struct scoutfs_quota_rule_val {
|
||||
__le64 name_val[3];
|
||||
__le64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
/* XXX does this exist upstream somewhere? */
|
||||
#define member_sizeof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
|
||||
|
||||
@@ -683,16 +725,19 @@ struct scoutfs_xattr_totl_val {
|
||||
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
|
||||
|
||||
/*
|
||||
* Once a leader is elected they send out heartbeats at regular
|
||||
* intervals to force members to wait the much longer heartbeat timeout.
|
||||
* Once heartbeat timeout expires without receiving a heartbeat they'll
|
||||
* switch over the performing elections.
|
||||
* Once a leader is elected they send heartbeat messages to all quorum
|
||||
* members at regular intervals to force members to wait the much longer
|
||||
* heartbeat timeout. Once the heartbeat timeout expires without
|
||||
* receiving a heartbeat message a member will start an election.
|
||||
*
|
||||
* These determine how long it could take members to notice that a
|
||||
* leader has gone silent and start to elect a new leader.
|
||||
* leader has gone silent and start to elect a new leader. The
|
||||
* heartbeat timeout can be changed at run time by options.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_MIN_HB_TIMEO_MS (2 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_DEF_HB_TIMEO_MS (10 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_MAX_HB_TIMEO_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* A newly elected leader will give fencing some time before giving up and
|
||||
@@ -856,9 +901,38 @@ struct scoutfs_inode {
|
||||
struct scoutfs_timespec ctime;
|
||||
struct scoutfs_timespec mtime;
|
||||
struct scoutfs_timespec crtime;
|
||||
__le64 proj;
|
||||
};
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INODE_FMT_V1_BYTES offsetof(struct scoutfs_inode, proj)
|
||||
|
||||
/*
|
||||
* There are so few versions that we don't mind doing this work inline
|
||||
* so that both utils and kernel can share these. Mounting has already
|
||||
* checked that the format version is within the supported min and max,
|
||||
* so these functions only deal with size variance within that band.
|
||||
*/
|
||||
/* Returns the native written inode size for the given format version, 0 for bad version */
|
||||
static inline int scoutfs_inode_vers_bytes(__u64 fmt_vers)
|
||||
{
|
||||
if (fmt_vers == 1)
|
||||
return SCOUTFS_INODE_FMT_V1_BYTES;
|
||||
else
|
||||
return sizeof(struct scoutfs_inode);
|
||||
}
|
||||
/*
|
||||
* Returns true if bytes is a valid inode size to read from the given
|
||||
* version. The given version must be greater than the version that
|
||||
* introduced the size.
|
||||
*/
|
||||
static inline int scoutfs_inode_valid_vers_bytes(__u64 fmt_vers, int bytes)
|
||||
{
|
||||
return (bytes == sizeof(struct scoutfs_inode) && fmt_vers == SCOUTFS_FORMAT_VERSION_MAX) ||
|
||||
(bytes == SCOUTFS_INODE_FMT_V1_BYTES);
|
||||
}
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INO_FLAG_RETENTION 0x2
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
|
||||
@@ -1017,7 +1091,8 @@ enum scoutfs_net_cmd {
|
||||
EXPAND_NET_ERRNO(ENOMEM) \
|
||||
EXPAND_NET_ERRNO(EIO) \
|
||||
EXPAND_NET_ERRNO(ENOSPC) \
|
||||
EXPAND_NET_ERRNO(EINVAL)
|
||||
EXPAND_NET_ERRNO(EINVAL) \
|
||||
EXPAND_NET_ERRNO(ENOLINK)
|
||||
|
||||
#undef EXPAND_NET_ERRNO
|
||||
#define EXPAND_NET_ERRNO(which) SCOUTFS_NET_ERR_##which,
|
||||
|
||||
926
kmod/src/inode.c
926
kmod/src/inode.c
File diff suppressed because it is too large
Load Diff
@@ -21,8 +21,9 @@ struct scoutfs_inode_info {
|
||||
u64 data_version;
|
||||
u64 online_blocks;
|
||||
u64 offline_blocks;
|
||||
u64 proj;
|
||||
u32 flags;
|
||||
struct timespec crtime;
|
||||
struct kc_timespec crtime;
|
||||
|
||||
/*
|
||||
* Protects per-inode extent items, most particularly readers
|
||||
@@ -47,7 +48,7 @@ struct scoutfs_inode_info {
|
||||
atomic64_t last_refreshed;
|
||||
|
||||
/* initialized once for slab object */
|
||||
seqcount_t seqcount;
|
||||
seqlock_t seqlock;
|
||||
bool staging; /* holder of i_mutex is staging */
|
||||
struct scoutfs_per_task pt_data_lock;
|
||||
struct scoutfs_data_waitq data_waitq;
|
||||
@@ -56,14 +57,16 @@ struct scoutfs_inode_info {
|
||||
|
||||
struct scoutfs_lock_coverage ino_lock_cov;
|
||||
|
||||
/* drop if i_count hits 0, allows drop while invalidate holds coverage */
|
||||
bool drop_invalidated;
|
||||
struct llist_node iput_llnode;
|
||||
atomic_t iput_count;
|
||||
struct list_head iput_head;
|
||||
unsigned long iput_count;
|
||||
unsigned long iput_flags;
|
||||
|
||||
struct inode inode;
|
||||
};
|
||||
|
||||
/* try to prune dcache aliases with queued iput */
|
||||
#define SI_IPUT_FLAG_PRUNE (1 << 0)
|
||||
|
||||
static inline struct scoutfs_inode_info *SCOUTFS_I(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct scoutfs_inode_info, inode);
|
||||
@@ -78,11 +81,15 @@ struct inode *scoutfs_alloc_inode(struct super_block *sb);
|
||||
void scoutfs_destroy_inode(struct inode *inode);
|
||||
int scoutfs_drop_inode(struct inode *inode);
|
||||
void scoutfs_evict_inode(struct inode *inode);
|
||||
void scoutfs_inode_queue_iput(struct inode *inode);
|
||||
void scoutfs_inode_queue_iput(struct inode *inode, unsigned long flags);
|
||||
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf);
|
||||
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
|
||||
#define SCOUTFS_IGF_LINKED (1 << 0) /* enoent if nlink == 0 */
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf);
|
||||
struct inode *scoutfs_ilookup_nowait(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup_nowait_nonewfree(struct super_block *sb, u64 ino);
|
||||
|
||||
|
||||
void scoutfs_inode_init_key(struct scoutfs_key *key, u64 ino);
|
||||
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
|
||||
u32 minor, u64 ino);
|
||||
int scoutfs_inode_index_start(struct super_block *sb, u64 *seq);
|
||||
@@ -102,9 +109,8 @@ void scoutfs_update_inode_item(struct inode *inode, struct scoutfs_lock *lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret);
|
||||
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t rdev, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t rdev,
|
||||
u64 ino, struct scoutfs_lock *lock, struct inode **inode_ret);
|
||||
|
||||
void scoutfs_inode_set_meta_seq(struct inode *inode);
|
||||
void scoutfs_inode_set_data_seq(struct inode *inode);
|
||||
@@ -115,16 +121,32 @@ u64 scoutfs_inode_meta_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_version(struct inode *inode);
|
||||
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off);
|
||||
u32 scoutfs_inode_get_flags(struct inode *inode);
|
||||
void scoutfs_inode_set_flags(struct inode *inode, u32 and, u32 or);
|
||||
u64 scoutfs_inode_get_proj(struct inode *inode);
|
||||
void scoutfs_inode_set_proj(struct inode *inode, u64 proj);
|
||||
|
||||
int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
|
||||
int flags);
|
||||
int scoutfs_inode_check_retention(struct inode *inode);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock);
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat);
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
#else
|
||||
int scoutfs_getattr(KC_VFS_NS_DEF
|
||||
const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags);
|
||||
#endif
|
||||
int scoutfs_setattr(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock);
|
||||
int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_lock *lock);
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode);
|
||||
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);
|
||||
|
||||
1094
kmod/src/ioctl.c
1094
kmod/src/ioctl.c
File diff suppressed because it is too large
Load Diff
323
kmod/src/ioctl.h
323
kmod/src/ioctl.h
@@ -520,4 +520,327 @@ struct scoutfs_ioctl_xattr_total {
|
||||
#define SCOUTFS_IOC_READ_XATTR_TOTALS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 15, struct scoutfs_ioctl_read_xattr_totals)
|
||||
|
||||
/*
|
||||
* This fills the caller's inos array with inode numbers that are in use
|
||||
* after the start ino, within an internal inode group.
|
||||
*
|
||||
* This only makes a promise about the state of the inode numbers within
|
||||
* the first and last numbers returned by one call. At one time, all of
|
||||
* those inodes were still allocated. They could have changed before
|
||||
* the call returned. And any numbers outside of the first and last
|
||||
* (or single) are undefined.
|
||||
*
|
||||
* This doesn't iterate over all allocated inodes, it only probes a
|
||||
* single group that the start inode is within. This interface was
|
||||
* first introduced to support tests that needed to find out about a
|
||||
* specific inode, while having some other similarly niche uses. It is
|
||||
* unsuitable for a consistent iteration over all the inode numbers in
|
||||
* use.
|
||||
*
|
||||
* This test of inode items doesn't serialize with the inode lifetime
|
||||
* mechanism. It only tells you the numbers of inodes that were once
|
||||
* active in the system and haven't yet been fully deleted. The inode
|
||||
* numbers returned could have been in the process of being deleted and
|
||||
* were already unreachable even before the call started.
|
||||
*
|
||||
* @start_ino: the first inode number that could be returned
|
||||
* @inos_ptr: pointer to an aligned array of 64bit inode numbers
|
||||
* @inos_bytes: the number of bytes available in the inos_ptr array
|
||||
*
|
||||
* Returns errors or the count of inode numbers returned, quite possibly
|
||||
* including 0.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_allocated_inos {
|
||||
__u64 start_ino;
|
||||
__u64 inos_ptr;
|
||||
__u64 inos_bytes;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_GET_ALLOCATED_INOS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 16, struct scoutfs_ioctl_get_allocated_inos)
|
||||
|
||||
/*
|
||||
* Get directory entries that refer to a specific inode.
|
||||
*
|
||||
* @ino: The target ino that we're finding referring entries to.
|
||||
* Constant across all the calls that make up an iteration over all the
|
||||
* inode's entries.
|
||||
*
|
||||
* @dir_ino: The inode number of a directory containing the entry to our
|
||||
* inode to search from. If this parent directory contains no more
|
||||
* entries to our inode then we'll search through other parent directory
|
||||
* inodes in inode order.
|
||||
*
|
||||
* @dir_pos: The position in the dir_ino parent directory of the entry
|
||||
* to our inode to search from. If there is no entry at this position
|
||||
* then we'll search through other entry positions in increasing order.
|
||||
* If we exhaust the parent directory then we'll search through
|
||||
* additional parent directories in inode order.
|
||||
*
|
||||
* @entries_ptr: A pointer to the buffer where found entries will be
|
||||
* stored. The pointer must be aligned to 16 bytes.
|
||||
*
|
||||
* @entries_bytes: The size of the buffer that will contain entries.
|
||||
*
|
||||
* To start iterating set the desired target ino, dir_ino to 0, dir_pos
|
||||
* to 0, and set result_ptr and _bytes to a sufficiently large buffer.
|
||||
* Each entry struct that's stored in the buffer adds some overhead so a
|
||||
* large multiple of the largest possible name is a reasonable choice.
|
||||
* (A few multiples of PATH_MAX perhaps.)
|
||||
*
|
||||
* Each call returns the total number of entries that were stored in the
|
||||
* entries buffer. Zero is returned when the search was successful and
|
||||
* no referring entries were found. The entries can be iterated over by
|
||||
* advancing each starting struct offset by the total number of bytes in
|
||||
* each entry. If the _LAST flag is set on an entry then there were no
|
||||
* more entries referring to the inode at the time of the call and
|
||||
* iteration can be stopped.
|
||||
*
|
||||
* To resume iteration set the next call's starting dir_ino and dir_pos
|
||||
* to one past the last entry seen. Increment the last entry's dir_pos,
|
||||
* and if it wrapped to 0, increment its dir_ino.
|
||||
*
|
||||
* This does not check that the caller has permission to read the
|
||||
* entries found in each containing directory. It requires
|
||||
* CAP_DAC_READ_SEARCH which bypasses path traversal permissions
|
||||
* checking.
|
||||
*
|
||||
* Entries returned by a single call can reflect any combination of
|
||||
* racing creation and removal of entries. Each entry existed at the
|
||||
* time it was read though it may have changed in the time it took to
|
||||
* return from the call. The set of entries returned may no longer
|
||||
* reflect the current set of entries and may not have existed at the
|
||||
* same time.
|
||||
*
|
||||
* This has no knowledge of the life cycle of the inode. It can return
|
||||
* 0 when there are no referring entries because either the target inode
|
||||
* doesn't exist, it is in the process of being deleted, or because it
|
||||
* is still open while being unlinked.
|
||||
*
|
||||
* On success this returns the number of entries filled in the buffer.
|
||||
* A return of 0 indicates that no entries referred to the inode.
|
||||
*
|
||||
* EINVAL is returned when there is a problem with the buffer. Either
|
||||
* it was not aligned or it was not large enough for the first entry.
|
||||
*
|
||||
* Many other errnos indicate hard failure to find the next entry.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_referring_entries {
|
||||
__u64 ino;
|
||||
__u64 dir_ino;
|
||||
__u64 dir_pos;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* @dir_ino: The inode of the directory containing the entry.
|
||||
*
|
||||
* @dir_pos: The readdir f_pos position of the entry within the
|
||||
* directory.
|
||||
*
|
||||
* @ino: The inode number of the target of the entry.
|
||||
*
|
||||
* @flags: Flags associated with this entry.
|
||||
*
|
||||
* @d_type: Inode type as specified with DT_ enum values in readdir(3).
|
||||
*
|
||||
* @entry_bytes: The total bytes taken by the entry in memory, including
|
||||
* the name and any alignment padding. The start of a following entry
|
||||
* will be found after this number of bytes.
|
||||
*
|
||||
* @name_len: The number of bytes in the name not including the trailing
|
||||
* null, ala strlen(3).
|
||||
*
|
||||
* @name: The null terminated name of the referring entry. In the
|
||||
* struct definition this array is sized to naturally align the struct.
|
||||
* That number of padded bytes are not necessarily found in the buffer
|
||||
* returned by _get_referring_entries;
|
||||
*/
|
||||
struct scoutfs_ioctl_dirent {
|
||||
__u64 dir_ino;
|
||||
__u64 dir_pos;
|
||||
__u64 ino;
|
||||
__u16 entry_bytes;
|
||||
__u8 flags;
|
||||
__u8 d_type;
|
||||
__u8 name_len;
|
||||
__u8 name[3];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOCTL_DIRENT_FLAG_LAST (1 << 0)
|
||||
|
||||
#define SCOUTFS_IOC_GET_REFERRING_ENTRIES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 17, struct scoutfs_ioctl_get_referring_entries)
|
||||
|
||||
struct scoutfs_ioctl_inode_attr_x {
|
||||
__u64 x_mask;
|
||||
__u64 x_flags;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
__u64 size;
|
||||
__u64 bits;
|
||||
__u64 project_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Behavioral flags set in the x_flags field. These flags don't
|
||||
* necessarily correspond to specific attributes, but instead change the
|
||||
* behaviour of a _get_ or _set_ operation.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_F_SIZE_OFFLINE: When setting i_size, also create
|
||||
* extents which are marked offline for the region of the file from
|
||||
* offset 0 to the new set size. This can only be set when setting the
|
||||
* size and has no effect if setting the size fails.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_F_SIZE_OFFLINE (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_F__UNKNOWN (U64_MAX << 1)
|
||||
|
||||
/*
|
||||
* Single-bit values stored in the @bits field. These indicate whether
|
||||
* the bit is set, or not. The main _IAX_ bits set in the mask indicate
|
||||
* whether this value bit is populated by _get or stored by _set.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_B_RETENTION (1ULL << 0)
|
||||
|
||||
/*
|
||||
* x_mask bits which indicate which attributes of the inode to populate
|
||||
* on return for _get or to set on the inode for _set. Each mask bit
|
||||
* corresponds to the matching named field in the attr_x struct passed
|
||||
* to the _get_ and _set_ calls.
|
||||
*
|
||||
* Each field can have different permissions or other attribute
|
||||
* requirements which can cause calls to fail. If _set_ fails then no
|
||||
* other attribute changes will have been made by the same call.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_RETENTION: Mark a file for retention. When marked,
|
||||
* no modification can be made to the file other than changing extended
|
||||
* attributes outside the "user." prefix and clearing the retention
|
||||
* mark. This can only be set on regular files and requires root (the
|
||||
* CAP_SYS_ADMIN capability). Other attributes can be set with a
|
||||
* set_attr_x call on a retention inode as long as that call also
|
||||
* successfully clears the retention mark.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_META_SEQ (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_DATA_SEQ (1ULL << 1)
|
||||
#define SCOUTFS_IOC_IAX_DATA_VERSION (1ULL << 2)
|
||||
#define SCOUTFS_IOC_IAX_ONLINE_BLOCKS (1ULL << 3)
|
||||
#define SCOUTFS_IOC_IAX_OFFLINE_BLOCKS (1ULL << 4)
|
||||
#define SCOUTFS_IOC_IAX_CTIME (1ULL << 5)
|
||||
#define SCOUTFS_IOC_IAX_CRTIME (1ULL << 6)
|
||||
#define SCOUTFS_IOC_IAX_SIZE (1ULL << 7)
|
||||
#define SCOUTFS_IOC_IAX_RETENTION (1ULL << 8)
|
||||
#define SCOUTFS_IOC_IAX_PROJECT_ID (1ULL << 9)
|
||||
|
||||
/* single bit attributes that are packed in the bits field as _B_ */
|
||||
#define SCOUTFS_IOC_IAX__BITS (SCOUTFS_IOC_IAX_RETENTION)
|
||||
/* inverse of all the bits we understand */
|
||||
#define SCOUTFS_IOC_IAX__UNKNOWN (U64_MAX << 10)
|
||||
|
||||
#define SCOUTFS_IOC_GET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 18, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
#define SCOUTFS_IOC_SET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 19, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
/*
|
||||
* (These fields are documented in the order that they're displayed by
|
||||
* the scoutfs cli utility which matches the sort order of the rules.)
|
||||
*
|
||||
* @prio: The priority of the rule. Rules are sorted by their fields
|
||||
* with prio at the highest magnitude. When multiple rules match the
|
||||
* rule with the highest sort order is enforced. The priority field
|
||||
* lets rules override the default field sort order.
|
||||
*
|
||||
* @name_val[3]: The three 64bit values that make up the name of the
|
||||
* totl xattr whose total will be checked against the rule's limit to
|
||||
* see if the quota rule has been exceeded. The behavior of the values
|
||||
* can be changed by their corresponding name_source and name_flags.
|
||||
*
|
||||
* @name_source[3]: The SQ_NS_ enums that control where the value comes
|
||||
* from. _LITERAL uses the value from name_val. Inode attribute
|
||||
* sources (_PROJ, _UID, _GID) are taken from the inode of the operation
|
||||
* that is being checked against the rule.
|
||||
*
|
||||
* @name_flags[3]: The SQ_NF_ enums that alter the name values. _SELECT
|
||||
* makes the rule only match if the inode attribute of the operation
|
||||
* matches the attribute value stored in name_val. This lets rules
|
||||
* match a specific value of an attribute rather than mapping all
|
||||
* attribute values of to totl names.
|
||||
*
|
||||
* @op: The SQ_OP_ enums which specify the operation that can't exceed
|
||||
* the rule's limit. _INODE checks inode creation and the inode
|
||||
* attributes are taken from the inode that would be created. _DATA
|
||||
* checks file data block allocation and the inode fields come from the
|
||||
* inode that is allocating the blocks.
|
||||
*
|
||||
* @limit: The 64bit value that is checked against the totl value
|
||||
* described by the rule. If the totl value is greater than or equal to
|
||||
* this value of the matching rule then the operation will return
|
||||
* -EDQUOT.
|
||||
*
|
||||
* @rule_flags: SQ_RF_TOTL_COUNT indicates that the rule's limit should
|
||||
* be checked against the number of xattrs contributing to a totl value
|
||||
* instead of the sum of the xattrs.
|
||||
*/
|
||||
struct scoutfs_ioctl_quota_rule {
|
||||
__u64 name_val[3];
|
||||
__u64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_get_quota_rules {
|
||||
__u64 iterator[2];
|
||||
__u64 rules_ptr;
|
||||
__u64 rules_nr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Rules are uniquely identified by their non-padded fields. Addition will fail
|
||||
* with -EEXIST if the specified rule already exists and deletion must find a rule
|
||||
* with all matching fields to delete.
|
||||
*/
|
||||
#define SCOUTFS_IOC_GET_QUOTA_RULES \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 20, struct scoutfs_ioctl_get_quota_rules)
|
||||
#define SCOUTFS_IOC_ADD_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 21, struct scoutfs_ioctl_quota_rule)
|
||||
#define SCOUTFS_IOC_DEL_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 22, struct scoutfs_ioctl_quota_rule)
|
||||
|
||||
/*
|
||||
* Inodes can be indexed in a global key space at a position determined
|
||||
* by a .indx. tagged xattr. The xattr name specifies the two index
|
||||
* position values, with major having the more significant comparison
|
||||
* order.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_index_entry {
|
||||
__u64 minor;
|
||||
__u64 ino;
|
||||
__u8 major;
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_read_xattr_index {
|
||||
__u64 flags;
|
||||
struct scoutfs_ioctl_xattr_index_entry first;
|
||||
struct scoutfs_ioctl_xattr_index_entry last;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_nr;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_INDEX \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 23, struct scoutfs_ioctl_read_xattr_index)
|
||||
|
||||
#endif
|
||||
|
||||
389
kmod/src/item.c
389
kmod/src/item.c
@@ -24,9 +24,11 @@
|
||||
#include "item.h"
|
||||
#include "forest.h"
|
||||
#include "block.h"
|
||||
#include "msg.h"
|
||||
#include "trans.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "util.h"
|
||||
|
||||
/*
|
||||
* The item cache maintains a consistent view of items that are read
|
||||
@@ -76,8 +78,10 @@ struct item_cache_info {
|
||||
/* almost always read, barely written */
|
||||
struct super_block *sb;
|
||||
struct item_percpu_pages __percpu *pcpu_pages;
|
||||
struct shrinker shrinker;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
#ifdef KC_CPU_NOTIFIER
|
||||
struct notifier_block notifier;
|
||||
#endif
|
||||
|
||||
/* often walked, but per-cpu refs are fast path */
|
||||
rwlock_t rwlock;
|
||||
@@ -93,9 +97,8 @@ struct item_cache_info {
|
||||
struct list_head lru_list;
|
||||
unsigned long lru_pages;
|
||||
|
||||
/* written by page readers, read by shrink */
|
||||
spinlock_t active_lock;
|
||||
struct list_head active_list;
|
||||
/* stop readers from caching stale items behind reclaimed cleaned written items */
|
||||
atomic64_t read_dirty_barrier;
|
||||
};
|
||||
|
||||
#define DECLARE_ITEM_CACHE_INFO(sb, name) \
|
||||
@@ -685,6 +688,12 @@ static void erase_page_items(struct cached_page *pg,
|
||||
* to the dirty list after the left page, and by adding items to the
|
||||
* tail of right's dirty list in key sort order.
|
||||
*
|
||||
* The max_seq of the source page might be larger than all the items
|
||||
* while protecting an erased item from being reclaimed while an older
|
||||
* read is in flight. We don't know where it might be in the source
|
||||
* page so we have to assume that it's in the key range being moved and
|
||||
* update the destination page's max_seq accordingly.
|
||||
*
|
||||
* The caller is responsible for page locking and managing the lru.
|
||||
*/
|
||||
static void move_page_items(struct super_block *sb,
|
||||
@@ -726,6 +735,9 @@ static void move_page_items(struct super_block *sb,
|
||||
|
||||
erase_item(left, from);
|
||||
}
|
||||
|
||||
if (left->max_seq > right->max_seq)
|
||||
right->max_seq = left->max_seq;
|
||||
}
|
||||
|
||||
enum page_intersection_type {
|
||||
@@ -1272,78 +1284,6 @@ static int cache_empty_page(struct super_block *sb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Readers operate independently from dirty items and transactions.
|
||||
* They read a set of persistent items and insert them into the cache
|
||||
* when there aren't already pages whose key range contains the items.
|
||||
* This naturally prefers cached dirty items over stale read items.
|
||||
*
|
||||
* We have to deal with the case where dirty items are written and
|
||||
* invalidated while a read is in flight. The reader won't have seen
|
||||
* the items that were dirty in their persistent roots as they started
|
||||
* reading. By the time they insert their read pages the previously
|
||||
* dirty items have been reclaimed and are not in the cache. The old
|
||||
* stale items will be inserted in their place, effectively corrupting
|
||||
* by having the dirty items disappear.
|
||||
*
|
||||
* We fix this by tracking the max seq of items in pages. As readers
|
||||
* start they record the current transaction seq. Invalidation skips
|
||||
* pages with a max seq greater than the first reader seq because the
|
||||
* items in the page have to stick around to prevent the readers stale
|
||||
* items from being inserted.
|
||||
*
|
||||
* This naturally only affects a small set of pages with items that were
|
||||
* written relatively recently. If we're in memory pressure then we
|
||||
* probably have a lot of pages and they'll naturally have items that
|
||||
* were visible to any raders. We don't bother with the complicated and
|
||||
* expensive further refinement of tracking the ranges that are being
|
||||
* read and comparing those with pages to invalidate.
|
||||
*/
|
||||
struct active_reader {
|
||||
struct list_head head;
|
||||
u64 seq;
|
||||
};
|
||||
|
||||
#define INIT_ACTIVE_READER(rdr) \
|
||||
struct active_reader rdr = { .head = LIST_HEAD_INIT(rdr.head) }
|
||||
|
||||
static void add_active_reader(struct super_block *sb, struct active_reader *active)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
|
||||
BUG_ON(!list_empty(&active->head));
|
||||
|
||||
active->seq = scoutfs_trans_sample_seq(sb);
|
||||
|
||||
spin_lock(&cinf->active_lock);
|
||||
list_add_tail(&active->head, &cinf->active_list);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
}
|
||||
|
||||
static u64 first_active_reader_seq(struct item_cache_info *cinf)
|
||||
{
|
||||
struct active_reader *active;
|
||||
u64 first;
|
||||
|
||||
/* only the calling task adds or deletes this active */
|
||||
spin_lock(&cinf->active_lock);
|
||||
active = list_first_entry_or_null(&cinf->active_list, struct active_reader, head);
|
||||
first = active ? active->seq : U64_MAX;
|
||||
spin_unlock(&cinf->active_lock);
|
||||
|
||||
return first;
|
||||
}
|
||||
|
||||
static void del_active_reader(struct item_cache_info *cinf, struct active_reader *active)
|
||||
{
|
||||
/* only the calling task adds or deletes this active */
|
||||
if (!list_empty(&active->head)) {
|
||||
spin_lock(&cinf->active_lock);
|
||||
list_del_init(&active->head);
|
||||
spin_unlock(&cinf->active_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a newly read item to the pages that we're assembling for
|
||||
* insertion into the cache. These pages are private, they only exist
|
||||
@@ -1437,24 +1377,34 @@ static int read_page_item(struct super_block *sb, struct scoutfs_key *key, u64 s
|
||||
* and duplicates, we insert any resulting pages which don't overlap
|
||||
* with existing cached pages.
|
||||
*
|
||||
* We only insert uncached regions because this is called with cluster
|
||||
* locks held, but without locking the cache. The regions we read can
|
||||
* be stale with respect to the current cache, which can be read and
|
||||
* dirtied by other cluster lock holders on our node, but the cluster
|
||||
* locks protect the stable items we read. Invalidation is careful not
|
||||
* to drop pages that have items that we couldn't see because they were
|
||||
* dirty when we started reading.
|
||||
*
|
||||
* The forest item reader is reading stable trees that could be
|
||||
* overwritten. It can return -ESTALE which we return to the caller who
|
||||
* will retry the operation and work with a new set of more recent
|
||||
* btrees.
|
||||
*
|
||||
* We only insert uncached regions because this is called with cluster
|
||||
* locks held, but without locking the cache. The regions we read can
|
||||
* be stale with respect to the current cache, which can be read and
|
||||
* dirtied by other cluster lock holders on our node, but the cluster
|
||||
* locks protect the stable items we read.
|
||||
*
|
||||
* Using the presence of locally written dirty pages to override stale
|
||||
* read pages only works if, well, the more recent locally written pages
|
||||
* are still present. Readers are totally decoupled from writers and
|
||||
* can have a set of items that is very old indeed. In the mean time
|
||||
* more recent items would have been dirtied locally, committed,
|
||||
* cleaned, and reclaimed. We have a coarse barrier which ensures that
|
||||
* readers can't insert items read from old roots from before local data
|
||||
* was written. If a write completes while a read is in progress the
|
||||
* read will have to retry. The retried read can use cached blocks so
|
||||
* we're relying on reads being much faster than writes to reduce the
|
||||
* overhead to mostly cpu work of recollecting the items from cached
|
||||
* blocks via a more recent root from the server.
|
||||
*/
|
||||
static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
struct scoutfs_key *key, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct rb_root root = RB_ROOT;
|
||||
INIT_ACTIVE_READER(active);
|
||||
struct cached_page *right = NULL;
|
||||
struct cached_page *pg;
|
||||
struct cached_page *rd;
|
||||
@@ -1467,6 +1417,7 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
struct rb_node *par;
|
||||
struct rb_node *pg_tmp;
|
||||
struct rb_node *item_tmp;
|
||||
u64 rdbar;
|
||||
int pgi;
|
||||
int ret;
|
||||
|
||||
@@ -1480,8 +1431,7 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
pg->end = lock->end;
|
||||
rbtree_insert(&pg->node, NULL, &root.rb_node, &root);
|
||||
|
||||
/* set active reader seq before reading persistent roots */
|
||||
add_active_reader(sb, &active);
|
||||
rdbar = atomic64_read(&cinf->read_dirty_barrier);
|
||||
|
||||
start = lock->start;
|
||||
end = lock->end;
|
||||
@@ -1520,11 +1470,19 @@ static int read_pages(struct super_block *sb, struct item_cache_info *cinf,
|
||||
retry:
|
||||
write_lock(&cinf->rwlock);
|
||||
|
||||
ret = 0;
|
||||
while ((rd = first_page(&root))) {
|
||||
|
||||
pg = page_rbtree_walk(sb, &cinf->pg_root, &rd->start, &rd->end,
|
||||
NULL, NULL, &par, &pnode);
|
||||
if (!pg) {
|
||||
/* can't insert if write is cleaning (write_lock is read barrier) */
|
||||
if (atomic64_read(&cinf->read_dirty_barrier) != rdbar) {
|
||||
scoutfs_inc_counter(sb, item_read_pages_barrier);
|
||||
ret = -ESTALE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* insert read pages that don't intersect */
|
||||
rbtree_erase(&rd->node, &root);
|
||||
rbtree_insert(&rd->node, par, pnode, &cinf->pg_root);
|
||||
@@ -1559,10 +1517,7 @@ retry:
|
||||
|
||||
write_unlock(&cinf->rwlock);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
del_active_reader(cinf, &active);
|
||||
|
||||
/* free any pages we left dangling on error */
|
||||
for_each_page_safe(&root, rd, pg_tmp) {
|
||||
rbtree_erase(&rd->node, &root);
|
||||
@@ -1622,6 +1577,7 @@ retry:
|
||||
ret = read_pages(sb, cinf, key, lock);
|
||||
if (ret < 0 && ret != -ESTALE)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, item_read_pages_retry);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -1658,10 +1614,29 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lock_safe(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
static int lock_safe(struct super_block *sb, struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
int mode)
|
||||
{
|
||||
if (WARN_ON_ONCE(!scoutfs_lock_protected(lock, key, mode)))
|
||||
bool prot = scoutfs_lock_protected(lock, key, mode);
|
||||
|
||||
if (!prot) {
|
||||
static bool once = false;
|
||||
if (!once) {
|
||||
scoutfs_err(sb, "lock (start "SK_FMT" end "SK_FMT" mode 0x%x) does not protect operation (key "SK_FMT" mode 0x%x)",
|
||||
SK_ARG(&lock->start), SK_ARG(&lock->end), lock->mode,
|
||||
SK_ARG(key), mode);
|
||||
dump_stack();
|
||||
once = true;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int optional_lock_mode_match(struct scoutfs_lock *lock, int mode)
|
||||
{
|
||||
if (WARN_ON_ONCE(lock && lock->mode != mode))
|
||||
return -EINVAL;
|
||||
else
|
||||
return 0;
|
||||
@@ -1688,8 +1663,8 @@ static int copy_val(void *dst, int dst_len, void *src, int src_len)
|
||||
* The amount of bytes copied is returned which can be 0 or truncated if
|
||||
* the caller's buffer isn't big enough.
|
||||
*/
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
static int item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, int len_limit, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
struct cached_item *item;
|
||||
@@ -1698,7 +1673,7 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_lookup);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_READ)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_READ)))
|
||||
goto out;
|
||||
|
||||
ret = get_cached_page(sb, cinf, lock, key, false, false, 0, &pg);
|
||||
@@ -1709,6 +1684,8 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
item = item_rbtree_walk(&pg->item_root, key, NULL, NULL, NULL);
|
||||
if (!item || item->deletion)
|
||||
ret = -ENOENT;
|
||||
else if (len_limit > 0 && item->val_len > len_limit)
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = copy_val(val, val_len, item->val, item->val_len);
|
||||
|
||||
@@ -1717,13 +1694,38 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
return item_lookup(sb, key, val, val_len, 0, lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy an item's value into the caller's buffer. If the item's value
|
||||
* is larger than the caller's buffer then -EIO is returned. If the
|
||||
* item is smaller then the bytes from the end of the copied value to
|
||||
* the end of the buffer are zeroed. The number of value bytes copied
|
||||
* is returned, and 0 can be returned for an item with no value.
|
||||
*/
|
||||
int scoutfs_item_lookup_smaller_zero(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = item_lookup(sb, key, val, val_len, val_len, lock);
|
||||
if (ret >= 0 && ret < val_len)
|
||||
memset(val + ret, 0, val_len - ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_item_lookup_exact(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_item_lookup(sb, key, val, val_len, lock);
|
||||
ret = item_lookup(sb, key, val, val_len, 0, lock);
|
||||
if (ret == val_len)
|
||||
ret = 0;
|
||||
else if (ret >= 0)
|
||||
@@ -1773,7 +1775,7 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key *key,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_READ)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_READ)))
|
||||
goto out;
|
||||
|
||||
pos = *key;
|
||||
@@ -1823,12 +1825,19 @@ out:
|
||||
* also increase the seqs. It lets us limit the inputs of item merging
|
||||
* to the last stable seq and ensure that all the items in open
|
||||
* transactions and granted locks will have greater seqs.
|
||||
*
|
||||
* This is a little awkward for WRITE_ONLY locks which can have much
|
||||
* older versions than the version of locked primary data that they're
|
||||
* operating on behalf of. Callers can optionally provide that primary
|
||||
* lock to get the version from. This ensures that items created under
|
||||
* WRITE_ONLY locks can not have versions less than their primary data.
|
||||
*/
|
||||
static u64 item_seq(struct super_block *sb, struct scoutfs_lock *lock)
|
||||
static u64 item_seq(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return max(sbi->trans_seq, lock->write_seq);
|
||||
return max3(sbi->trans_seq, lock->write_seq, primary ? primary->write_seq : 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1847,7 +1856,7 @@ int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_dirty);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -1863,7 +1872,7 @@ int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key *key,
|
||||
if (!item || item->deletion) {
|
||||
ret = -ENOENT;
|
||||
} else {
|
||||
item->seq = item_seq(sb, lock);
|
||||
item->seq = item_seq(sb, lock, NULL);
|
||||
mark_item_dirty(sb, cinf, pg, NULL, item);
|
||||
ret = 0;
|
||||
}
|
||||
@@ -1880,10 +1889,10 @@ out:
|
||||
*/
|
||||
static int item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock,
|
||||
int mode, bool force)
|
||||
struct scoutfs_lock *primary, int mode, bool force)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
const u64 seq = item_seq(sb, lock, primary);
|
||||
struct cached_item *found;
|
||||
struct cached_item *item;
|
||||
struct cached_page *pg;
|
||||
@@ -1893,7 +1902,8 @@ static int item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_create);
|
||||
|
||||
if ((ret = lock_safe(lock, key, mode)))
|
||||
if ((ret = lock_safe(sb, lock, key, mode)) ||
|
||||
(ret = optional_lock_mode_match(primary, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -1934,15 +1944,15 @@ out:
|
||||
int scoutfs_item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
return item_create(sb, key, val, val_len, lock,
|
||||
SCOUTFS_LOCK_READ, false);
|
||||
return item_create(sb, key, val, val_len, lock, NULL,
|
||||
SCOUTFS_LOCK_WRITE, false);
|
||||
}
|
||||
|
||||
int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock)
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary)
|
||||
{
|
||||
return item_create(sb, key, val, val_len, lock,
|
||||
return item_create(sb, key, val, val_len, lock, primary,
|
||||
SCOUTFS_LOCK_WRITE_ONLY, true);
|
||||
}
|
||||
|
||||
@@ -1956,7 +1966,7 @@ int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
const u64 seq = item_seq(sb, lock, NULL);
|
||||
struct cached_item *item;
|
||||
struct cached_item *found;
|
||||
struct cached_page *pg;
|
||||
@@ -1966,7 +1976,7 @@ int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_update);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -2016,12 +2026,16 @@ out:
|
||||
* current items so the caller always writes with write only locks. If
|
||||
* combining the current delta item and the caller's item results in a
|
||||
* null we can just drop it, we don't have to emit a deletion item.
|
||||
*
|
||||
* Delta items don't have to worry about creating items with old
|
||||
* versions under write_only locks. The versions don't impact how we
|
||||
* merge two items.
|
||||
*/
|
||||
int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
const u64 seq = item_seq(sb, lock, NULL);
|
||||
struct cached_item *item;
|
||||
struct cached_page *pg;
|
||||
struct rb_node **pnode;
|
||||
@@ -2030,7 +2044,7 @@ int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_delta);
|
||||
|
||||
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
|
||||
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -2090,10 +2104,11 @@ out:
|
||||
* deletion item if there isn't one already cached.
|
||||
*/
|
||||
static int item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock, int mode, bool force)
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary,
|
||||
int mode, bool force)
|
||||
{
|
||||
DECLARE_ITEM_CACHE_INFO(sb, cinf);
|
||||
const u64 seq = item_seq(sb, lock);
|
||||
const u64 seq = item_seq(sb, lock, primary);
|
||||
struct cached_item *item;
|
||||
struct cached_page *pg;
|
||||
struct rb_node **pnode;
|
||||
@@ -2102,7 +2117,8 @@ static int item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
|
||||
scoutfs_inc_counter(sb, item_delete);
|
||||
|
||||
if ((ret = lock_safe(lock, key, mode)))
|
||||
if ((ret = lock_safe(sb, lock, key, mode)) ||
|
||||
(ret = optional_lock_mode_match(primary, SCOUTFS_LOCK_WRITE)))
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_forest_set_bloom_bits(sb, lock);
|
||||
@@ -2152,13 +2168,13 @@ out:
|
||||
int scoutfs_item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock)
|
||||
{
|
||||
return item_delete(sb, key, lock, SCOUTFS_LOCK_WRITE, false);
|
||||
return item_delete(sb, key, lock, NULL, SCOUTFS_LOCK_WRITE, false);
|
||||
}
|
||||
|
||||
int scoutfs_item_delete_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock)
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary)
|
||||
{
|
||||
return item_delete(sb, key, lock, SCOUTFS_LOCK_WRITE_ONLY, true);
|
||||
return item_delete(sb, key, lock, primary, SCOUTFS_LOCK_WRITE_ONLY, true);
|
||||
}
|
||||
|
||||
u64 scoutfs_item_dirty_pages(struct super_block *sb)
|
||||
@@ -2168,18 +2184,18 @@ u64 scoutfs_item_dirty_pages(struct super_block *sb)
|
||||
return (u64)atomic_read(&cinf->dirty_pages);
|
||||
}
|
||||
|
||||
static int cmp_pg_start(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_pg_start(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct cached_page *a = list_entry(A, struct cached_page, dirty_head);
|
||||
struct cached_page *b = list_entry(B, struct cached_page, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_page *a = list_entry(A, KC_LIST_CMP_CONST struct cached_page, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_page *b = list_entry(B, KC_LIST_CMP_CONST struct cached_page, dirty_head);
|
||||
|
||||
return scoutfs_key_compare(&a->start, &b->start);
|
||||
}
|
||||
|
||||
static int cmp_item_key(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_item_key(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct cached_item *a = list_entry(A, struct cached_item, dirty_head);
|
||||
struct cached_item *b = list_entry(B, struct cached_item, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_item *a = list_entry(A, KC_LIST_CMP_CONST struct cached_item, dirty_head);
|
||||
KC_LIST_CMP_CONST struct cached_item *b = list_entry(B, KC_LIST_CMP_CONST struct cached_item, dirty_head);
|
||||
|
||||
return scoutfs_key_compare(&a->key, &b->key);
|
||||
}
|
||||
@@ -2246,7 +2262,7 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
list_add(&page->list, &pages);
|
||||
list_add(&page->lru, &pages);
|
||||
|
||||
first = NULL;
|
||||
prev = &first;
|
||||
@@ -2259,7 +2275,7 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
list_add(&second->list, &pages);
|
||||
list_add(&second->lru, &pages);
|
||||
}
|
||||
|
||||
/* read lock next sorted page, we're only dirty_list user */
|
||||
@@ -2316,8 +2332,8 @@ int scoutfs_item_write_dirty(struct super_block *sb)
|
||||
/* write all the dirty items into log btree blocks */
|
||||
ret = scoutfs_forest_insert_list(sb, first);
|
||||
out:
|
||||
list_for_each_entry_safe(page, second, &pages, list) {
|
||||
list_del_init(&page->list);
|
||||
list_for_each_entry_safe(page, second, &pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
@@ -2328,6 +2344,12 @@ out:
|
||||
* The caller has successfully committed all the dirty btree blocks that
|
||||
* contained the currently dirty items. Clear all the dirty items and
|
||||
* pages.
|
||||
*
|
||||
* This strange lock/trylock loop comes from sparse issuing spurious
|
||||
* mismatched context warnings if we do anything (like unlock and relax)
|
||||
* in the else branch of the failed trylock. We're jumping through
|
||||
* hoops to not use the else but still drop and reacquire the dirty_lock
|
||||
* if the trylock fails.
|
||||
*/
|
||||
int scoutfs_item_write_done(struct super_block *sb)
|
||||
{
|
||||
@@ -2336,40 +2358,34 @@ int scoutfs_item_write_done(struct super_block *sb)
|
||||
struct cached_item *tmp;
|
||||
struct cached_page *pg;
|
||||
|
||||
retry:
|
||||
/* don't let read_pages insert possibly stale items */
|
||||
atomic64_inc(&cinf->read_dirty_barrier);
|
||||
smp_mb__after_atomic();
|
||||
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
|
||||
while ((pg = list_first_entry_or_null(&cinf->dirty_list,
|
||||
struct cached_page,
|
||||
dirty_head))) {
|
||||
|
||||
if (!write_trylock(&pg->rwlock)) {
|
||||
while ((pg = list_first_entry_or_null(&cinf->dirty_list, struct cached_page, dirty_head))) {
|
||||
if (write_trylock(&pg->rwlock)) {
|
||||
spin_unlock(&cinf->dirty_lock);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
list_for_each_entry_safe(item, tmp, &pg->dirty_list,
|
||||
dirty_head) {
|
||||
clear_item_dirty(sb, cinf, pg, item);
|
||||
|
||||
if (item->delta)
|
||||
scoutfs_inc_counter(sb, item_delta_written);
|
||||
|
||||
/* free deletion items */
|
||||
if (item->deletion || item->delta)
|
||||
erase_item(pg, item);
|
||||
else
|
||||
item->persistent = 1;
|
||||
}
|
||||
|
||||
write_unlock(&pg->rwlock);
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
}
|
||||
spin_unlock(&cinf->dirty_lock);
|
||||
|
||||
list_for_each_entry_safe(item, tmp, &pg->dirty_list,
|
||||
dirty_head) {
|
||||
clear_item_dirty(sb, cinf, pg, item);
|
||||
|
||||
if (item->delta)
|
||||
scoutfs_inc_counter(sb, item_delta_written);
|
||||
|
||||
/* free deletion items */
|
||||
if (item->deletion || item->delta)
|
||||
erase_item(pg, item);
|
||||
else
|
||||
item->persistent = 1;
|
||||
}
|
||||
|
||||
write_unlock(&pg->rwlock);
|
||||
|
||||
spin_lock(&cinf->dirty_lock);
|
||||
}
|
||||
|
||||
} while (pg);
|
||||
spin_unlock(&cinf->dirty_lock);
|
||||
|
||||
return 0;
|
||||
@@ -2499,41 +2515,40 @@ retry:
|
||||
put_pg(sb, right);
|
||||
}
|
||||
|
||||
static unsigned long item_cache_count_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct item_cache_info *cinf = KC_SHRINKER_CONTAINER_OF(shrink, struct item_cache_info);
|
||||
struct super_block *sb = cinf->sb;
|
||||
|
||||
scoutfs_inc_counter(sb, item_cache_count_objects);
|
||||
|
||||
return shrinker_min_long(cinf->lru_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Shrink the size the item cache. We're operating against the fast
|
||||
* path lock ordering and we skip pages if we can't acquire locks. We
|
||||
* can run into dirty pages or pages with items that weren't visible to
|
||||
* the earliest active reader which must be skipped.
|
||||
*/
|
||||
static int item_lru_shrink(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
static unsigned long item_cache_scan_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct item_cache_info *cinf = container_of(shrink,
|
||||
struct item_cache_info,
|
||||
shrinker);
|
||||
struct item_cache_info *cinf = KC_SHRINKER_CONTAINER_OF(shrink, struct item_cache_info);
|
||||
struct super_block *sb = cinf->sb;
|
||||
struct cached_page *tmp;
|
||||
struct cached_page *pg;
|
||||
u64 first_reader_seq;
|
||||
int nr;
|
||||
unsigned long freed = 0;
|
||||
int nr = sc->nr_to_scan;
|
||||
|
||||
if (sc->nr_to_scan == 0)
|
||||
goto out;
|
||||
nr = sc->nr_to_scan;
|
||||
|
||||
/* can't invalidate pages with items that weren't visible to first reader */
|
||||
first_reader_seq = first_active_reader_seq(cinf);
|
||||
scoutfs_inc_counter(sb, item_cache_scan_objects);
|
||||
|
||||
write_lock(&cinf->rwlock);
|
||||
spin_lock(&cinf->lru_lock);
|
||||
|
||||
list_for_each_entry_safe(pg, tmp, &cinf->lru_list, lru_head) {
|
||||
|
||||
if (first_reader_seq <= pg->max_seq) {
|
||||
scoutfs_inc_counter(sb, item_shrink_page_reader);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!write_trylock(&pg->rwlock)) {
|
||||
scoutfs_inc_counter(sb, item_shrink_page_trylock);
|
||||
continue;
|
||||
@@ -2551,6 +2566,7 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
rbtree_erase(&pg->node, &cinf->pg_root);
|
||||
invalidate_pcpu_page(pg);
|
||||
write_unlock(&pg->rwlock);
|
||||
freed++;
|
||||
|
||||
put_pg(sb, pg);
|
||||
|
||||
@@ -2560,10 +2576,11 @@ static int item_lru_shrink(struct shrinker *shrink,
|
||||
|
||||
write_unlock(&cinf->rwlock);
|
||||
spin_unlock(&cinf->lru_lock);
|
||||
out:
|
||||
return min_t(unsigned long, cinf->lru_pages, INT_MAX);
|
||||
|
||||
return freed;
|
||||
}
|
||||
|
||||
#ifdef KC_CPU_NOTIFIER
|
||||
static int item_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
@@ -2578,6 +2595,7 @@ static int item_cpu_callback(struct notifier_block *nfb,
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
int scoutfs_item_setup(struct super_block *sb)
|
||||
{
|
||||
@@ -2597,8 +2615,7 @@ int scoutfs_item_setup(struct super_block *sb)
|
||||
atomic_set(&cinf->dirty_pages, 0);
|
||||
spin_lock_init(&cinf->lru_lock);
|
||||
INIT_LIST_HEAD(&cinf->lru_list);
|
||||
spin_lock_init(&cinf->active_lock);
|
||||
INIT_LIST_HEAD(&cinf->active_list);
|
||||
atomic64_set(&cinf->read_dirty_barrier, 0);
|
||||
|
||||
cinf->pcpu_pages = alloc_percpu(struct item_percpu_pages);
|
||||
if (!cinf->pcpu_pages)
|
||||
@@ -2607,11 +2624,13 @@ int scoutfs_item_setup(struct super_block *sb)
|
||||
for_each_possible_cpu(cpu)
|
||||
init_pcpu_pages(cinf, cpu);
|
||||
|
||||
cinf->shrinker.shrink = item_lru_shrink;
|
||||
cinf->shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&cinf->shrinker);
|
||||
KC_INIT_SHRINKER_FUNCS(&cinf->shrinker, item_cache_count_objects,
|
||||
item_cache_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&cinf->shrinker, "scoutfs-item:" SCSBF, SCSB_ARGS(sb));
|
||||
#ifdef KC_CPU_NOTIFIER
|
||||
cinf->notifier.notifier_call = item_cpu_callback;
|
||||
register_hotcpu_notifier(&cinf->notifier);
|
||||
#endif
|
||||
|
||||
sbi->item_cache_info = cinf;
|
||||
return 0;
|
||||
@@ -2629,10 +2648,10 @@ void scoutfs_item_destroy(struct super_block *sb)
|
||||
int cpu;
|
||||
|
||||
if (cinf) {
|
||||
BUG_ON(!list_empty(&cinf->active_list));
|
||||
|
||||
#ifdef KC_CPU_NOTIFIER
|
||||
unregister_hotcpu_notifier(&cinf->notifier);
|
||||
unregister_shrinker(&cinf->shrinker);
|
||||
#endif
|
||||
KC_UNREGISTER_SHRINKER(&cinf->shrinker);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
drop_pcpu_pages(sb, cinf, cpu);
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_smaller_zero(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_exact(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock);
|
||||
@@ -15,16 +17,15 @@ int scoutfs_item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock);
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary);
|
||||
int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete_force(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary);
|
||||
|
||||
u64 scoutfs_item_dirty_pages(struct super_block *sb);
|
||||
int scoutfs_item_write_dirty(struct super_block *sb);
|
||||
|
||||
149
kmod/src/kernelcompat.c
Normal file
149
kmod/src/kernelcompat.c
Normal file
@@ -0,0 +1,149 @@
|
||||
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include "kernelcompat.h"
|
||||
|
||||
#ifdef KC_SHRINKER_SHRINK
|
||||
#include <linux/shrinker.h>
|
||||
/*
|
||||
* If a target doesn't have that .{count,scan}_objects() interface then
|
||||
* we have a .shrink() helper that performs the shrink work in terms of
|
||||
* count/scan.
|
||||
*/
|
||||
int kc_shrink_wrapper_fn(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct kc_shrinker_wrapper *wrapper = container_of(shrink, struct kc_shrinker_wrapper, shrink);
|
||||
unsigned long nr;
|
||||
unsigned long rc;
|
||||
|
||||
if (sc->nr_to_scan != 0) {
|
||||
rc = wrapper->scan_objects(shrink, sc);
|
||||
/* translate magic values to the equivalent for older kernels */
|
||||
if (rc == SHRINK_STOP)
|
||||
return -1;
|
||||
else if (rc == SHRINK_EMPTY)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nr = wrapper->count_objects(shrink, sc);
|
||||
|
||||
return min_t(unsigned long, nr, INT_MAX);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_CURRENT_TIME_INODE
|
||||
struct timespec64 kc_current_time(struct inode *inode)
|
||||
{
|
||||
struct timespec64 now;
|
||||
unsigned gran;
|
||||
|
||||
getnstimeofday64(&now);
|
||||
|
||||
if (unlikely(!inode->i_sb)) {
|
||||
WARN(1, "current_time() called with uninitialized super_block in the inode");
|
||||
return now;
|
||||
}
|
||||
|
||||
gran = inode->i_sb->s_time_gran;
|
||||
|
||||
/* Avoid division in the common cases 1 ns and 1 s. */
|
||||
if (gran == 1) {
|
||||
/* nothing */
|
||||
} else if (gran == NSEC_PER_SEC) {
|
||||
now.tv_nsec = 0;
|
||||
} else if (gran > 1 && gran < NSEC_PER_SEC) {
|
||||
now.tv_nsec -= now.tv_nsec % gran;
|
||||
} else {
|
||||
WARN(1, "illegal file time granularity: %u", gran);
|
||||
}
|
||||
|
||||
return now;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
ssize_t
|
||||
kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written)
|
||||
{
|
||||
ssize_t status;
|
||||
struct iov_iter i;
|
||||
|
||||
iov_iter_init(&i, WRITE, iov, nr_segs, count);
|
||||
status = kc_generic_perform_write(iocb, &i, pos);
|
||||
|
||||
if (likely(status >= 0)) {
|
||||
written += status;
|
||||
*ppos = pos + status;
|
||||
}
|
||||
|
||||
return written ? written : status;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#ifdef KC_LIST_LRU_WALK_CB_ITEM_LOCK
|
||||
static enum lru_status kc_isolate(struct list_head *item, spinlock_t *lock, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args *args = cb_arg;
|
||||
|
||||
/* isolate doesn't use list, nr_items updated in caller */
|
||||
return args->isolate(item, NULL, args->cb_arg);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_walk(lru, kc_isolate, &args, nr_to_walk);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_shrink_walk(lru, sc, kc_isolate, &args);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_LRU_WALK_CB_LIST_LOCK
|
||||
static enum lru_status kc_isolate(struct list_head *item, struct list_lru_one *list,
|
||||
spinlock_t *lock, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args *args = cb_arg;
|
||||
|
||||
return args->isolate(item, list, args->cb_arg);
|
||||
}
|
||||
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_walk(lru, kc_isolate, &args, nr_to_walk);
|
||||
}
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg)
|
||||
{
|
||||
struct kc_isolate_args args = {
|
||||
.isolate = isolate,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
|
||||
return list_lru_shrink_walk(lru, sc, kc_isolate, &args);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,48 +1,459 @@
|
||||
#ifndef _SCOUTFS_KERNELCOMPAT_H_
|
||||
#define _SCOUTFS_KERNELCOMPAT_H_
|
||||
|
||||
#ifndef KC_ITERATE_DIR_CONTEXT
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
typedef filldir_t kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, dirent, ctx)
|
||||
#define KC_FOP_READDIR readdir
|
||||
#define kc_readdir_pos(filp, ctx) (filp)->f_pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, dirent, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
(ctx(dirent, name, name_len, pos, ino, dt) == 0)
|
||||
|
||||
/*
|
||||
* v4.15-rc3-4-gae5e165d855d
|
||||
*
|
||||
* new API for handling inode->i_version. This forces us to
|
||||
* include this API where we need. We include it here for
|
||||
* convenience instead of where it's needed.
|
||||
*/
|
||||
#ifdef KC_NEED_LINUX_IVERSION_H
|
||||
#include <linux/iversion.h>
|
||||
#else
|
||||
typedef struct dir_context * kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, ctx)
|
||||
#define KC_FOP_READDIR iterate
|
||||
#define kc_readdir_pos(filp, ctx) (ctx)->pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
dir_emit(ctx, name, name_len, ino, dt)
|
||||
/*
|
||||
* Kernels before above version will need to fall back to
|
||||
* manipulating inode->i_version as previous with degraded
|
||||
* methods.
|
||||
*/
|
||||
#define inode_set_iversion_queried(inode, val) \
|
||||
do { \
|
||||
(inode)->i_version = val; \
|
||||
} while (0)
|
||||
#define inode_peek_iversion(inode) \
|
||||
({ \
|
||||
(inode)->i_version; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef KC_POSIX_ACL_VALID_USER_NS
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(user_ns, acl)
|
||||
#else
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(acl)
|
||||
#endif
|
||||
|
||||
#ifndef KC_DIR_EMIT_DOTS
|
||||
/*
|
||||
* Kernels before ->iterate and don't have dir_emit_dots so we give them
|
||||
* one that works with the ->readdir() filldir() method.
|
||||
* v3.6-rc1-24-gdbf2576e37da
|
||||
*
|
||||
* All workqueues are now non-reentrant, and the bit flag is removed
|
||||
* shortly after its uses were removed.
|
||||
*/
|
||||
static inline int dir_emit_dots(struct file *file, void *dirent,
|
||||
filldir_t filldir)
|
||||
#ifndef WQ_NON_REENTRANT
|
||||
#define WQ_NON_REENTRANT 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v3.18-rc2-19-gb5ae6b15bd73
|
||||
*
|
||||
* Folds d_materialise_unique into d_splice_alias. Note reversal
|
||||
* of arguments (Also note Documentation/filesystems/porting.rst)
|
||||
*/
|
||||
#ifndef KC_D_MATERIALISE_UNIQUE
|
||||
#define d_materialise_unique(dentry, inode) d_splice_alias(inode, dentry)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v4.8-rc1-29-g31051c85b5e2
|
||||
*
|
||||
* fall back to inode_change_ok() if setattr_prepare() isn't available
|
||||
*/
|
||||
#ifndef KC_SETATTR_PREPARE
|
||||
#define setattr_prepare(dentry, attr) inode_change_ok(d_inode(dentry), attr)
|
||||
#endif
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
#define __posix_acl_create posix_acl_create
|
||||
#define __posix_acl_chmod posix_acl_chmod
|
||||
#endif
|
||||
|
||||
#ifndef KC_PERCPU_COUNTER_ADD_BATCH
|
||||
#define percpu_counter_add_batch __percpu_counter_add
|
||||
#endif
|
||||
|
||||
#ifndef KC_MEMALLOC_NOFS_SAVE
|
||||
#define memalloc_nofs_save memalloc_noio_save
|
||||
#define memalloc_nofs_restore memalloc_noio_restore
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_BI_OPF
|
||||
#define kc_bio_get_opf(bio) \
|
||||
({ \
|
||||
(bio)->bi_opf; \
|
||||
})
|
||||
#define kc_bio_set_opf(bio, opf) \
|
||||
do { \
|
||||
(bio)->bi_opf = opf; \
|
||||
} while (0)
|
||||
#define kc_bio_set_sector(bio, sect) \
|
||||
do { \
|
||||
(bio)->bi_iter.bi_sector = sect;\
|
||||
} while (0)
|
||||
#define kc_submit_bio(bio) submit_bio(bio)
|
||||
#else
|
||||
#define kc_bio_get_opf(bio) \
|
||||
({ \
|
||||
(bio)->bi_rw; \
|
||||
})
|
||||
#define kc_bio_set_opf(bio, opf) \
|
||||
do { \
|
||||
(bio)->bi_rw = opf; \
|
||||
} while (0)
|
||||
#define kc_bio_set_sector(bio, sect) \
|
||||
do { \
|
||||
(bio)->bi_sector = sect; \
|
||||
} while (0)
|
||||
#define kc_submit_bio(bio) \
|
||||
do { \
|
||||
submit_bio((bio)->bi_rw, bio); \
|
||||
} while (0)
|
||||
#define bio_set_dev(bio, bdev) \
|
||||
do { \
|
||||
(bio)->bi_bdev = (bdev); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_BI_STATUS
|
||||
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio)
|
||||
#define kc_bio_get_errno(bio) ({ blk_status_to_errno((bio)->bi_status); })
|
||||
#else
|
||||
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio, int _error_arg)
|
||||
#define kc_bio_get_errno(bio) ({ (int)((void)(bio), _error_arg); })
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v4.13-rc1-6-ge462ec50cb5f
|
||||
*
|
||||
* MS_* (mount) flags from <linux/mount.h> should not be used in the kernel
|
||||
* anymore from 4.x onwards. Instead, we need to use the SB_* (superblock) flags
|
||||
*/
|
||||
#ifndef SB_POSIXACL
|
||||
#define SB_POSIXACL MS_POSIXACL
|
||||
#define SB_I_VERSION MS_I_VERSION
|
||||
#endif
|
||||
|
||||
#ifndef KC_CURRENT_TIME_INODE
|
||||
struct timespec64 kc_current_time(struct inode *inode);
|
||||
#define current_time kc_current_time
|
||||
#define kc_timespec timespec
|
||||
#else
|
||||
#define kc_timespec timespec64
|
||||
#endif
|
||||
|
||||
#ifndef KC_SHRINKER_SHRINK
|
||||
|
||||
#define KC_DEFINE_SHRINKER(name) struct shrinker name
|
||||
#define KC_INIT_SHRINKER_FUNCS(name, countfn, scanfn) do { \
|
||||
__typeof__(name) _shrink = (name); \
|
||||
_shrink->count_objects = (countfn); \
|
||||
_shrink->scan_objects = (scanfn); \
|
||||
_shrink->seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(ptr, type, shrinker)
|
||||
#ifdef KC_SHRINKER_NAME
|
||||
#define KC_REGISTER_SHRINKER register_shrinker
|
||||
#else
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr))
|
||||
#endif /* KC_SHRINKER_NAME */
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr)
|
||||
#else
|
||||
|
||||
#include <linux/shrinker.h>
|
||||
#ifndef SHRINK_STOP
|
||||
#define SHRINK_STOP (~0UL)
|
||||
#define SHRINK_EMPTY (~0UL - 1)
|
||||
#endif
|
||||
|
||||
int kc_shrink_wrapper_fn(struct shrinker *shrink, struct shrink_control *sc);
|
||||
struct kc_shrinker_wrapper {
|
||||
unsigned long (*count_objects)(struct shrinker *, struct shrink_control *sc);
|
||||
unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *sc);
|
||||
struct shrinker shrink;
|
||||
};
|
||||
|
||||
#define KC_DEFINE_SHRINKER(name) struct kc_shrinker_wrapper name;
|
||||
#define KC_INIT_SHRINKER_FUNCS(name, countfn, scanfn) do { \
|
||||
struct kc_shrinker_wrapper *_wrap = (name); \
|
||||
_wrap->count_objects = (countfn); \
|
||||
_wrap->scan_objects = (scanfn); \
|
||||
_wrap->shrink.shrink = kc_shrink_wrapper_fn; \
|
||||
_wrap->shrink.seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(container_of(ptr, struct kc_shrinker_wrapper, shrink), type, shrinker)
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr.shrink))
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr.shrink))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr.shrink)
|
||||
|
||||
#endif /* KC_SHRINKER_SHRINK */
|
||||
|
||||
#ifdef KC_KERNEL_GETSOCKNAME_ADDRLEN
|
||||
#include <linux/net.h>
|
||||
#include <linux/inet.h>
|
||||
static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
if (file->f_pos == 0) {
|
||||
if (filldir(dirent, ".", 1, 1,
|
||||
file->f_path.dentry->d_inode->i_ino, DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 1;
|
||||
}
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int ret = kernel_getsockname(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (file->f_pos == 1) {
|
||||
if (filldir(dirent, "..", 2, 1,
|
||||
parent_ino(file->f_path.dentry), DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 2;
|
||||
}
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
static inline int kc_kernel_getpeername(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int ret = kernel_getpeername(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
#else
|
||||
#define kc_kernel_getsockname(sock, addr) kernel_getsockname(sock, addr)
|
||||
#define kc_kernel_getpeername(sock, addr) kernel_getpeername(sock, addr)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_CREATE_KERN_NET
|
||||
#define kc_sock_create_kern(family, type, proto, res) sock_create_kern(&init_net, family, type, proto, res)
|
||||
#else
|
||||
#define kc_sock_create_kern sock_create_kern
|
||||
#endif
|
||||
|
||||
#ifndef KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
ssize_t kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written);
|
||||
#define generic_file_buffered_write kc_generic_file_buffered_write
|
||||
#ifdef KC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
iocb->ki_pos = pos;
|
||||
return generic_perform_write(iocb, iter);
|
||||
}
|
||||
#else
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
return generic_perform_write(file, iter, pos);
|
||||
}
|
||||
#endif
|
||||
#endif // KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
|
||||
#ifndef KC_HAVE_BLK_OPF_T
|
||||
/* typedef __u32 __bitwise blk_opf_t; */
|
||||
typedef unsigned int blk_opf_t;
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
#define KC_LIST_CMP_CONST const
|
||||
#else
|
||||
#define KC_LIST_CMP_CONST
|
||||
#endif
|
||||
|
||||
#ifdef KC_VMALLOC_PGPROT_T
|
||||
#define kc__vmalloc(size, gfp_mask) __vmalloc(size, gfp_mask, PAGE_KERNEL)
|
||||
#else
|
||||
#define kc__vmalloc __vmalloc
|
||||
#endif
|
||||
|
||||
#ifdef KC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
#define KC_VFS_NS_DEF struct user_namespace *mnt_user_ns,
|
||||
#define KC_VFS_NS mnt_user_ns,
|
||||
#define KC_VFS_INIT_NS &init_user_ns,
|
||||
#else
|
||||
#define KC_VFS_NS_DEF
|
||||
#define KC_VFS_NS
|
||||
#define KC_VFS_INIT_NS
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
#define kc_bio_alloc bio_alloc
|
||||
#else
|
||||
#include <linux/bio.h>
|
||||
static inline struct bio *kc_bio_alloc(struct block_device *bdev, unsigned short nr_vecs,
|
||||
blk_opf_t opf, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *b = bio_alloc(gfp_mask, nr_vecs);
|
||||
if (b) {
|
||||
kc_bio_set_opf(b, opf);
|
||||
bio_set_dev(b, bdev);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_FIEMAP_PREP
|
||||
#define fiemap_prep(inode, fieinfo, start, len, flags) fiemap_check_flags(fieinfo, flags)
|
||||
#endif
|
||||
|
||||
#ifndef KC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
#define __kernel_old_timeval timeval
|
||||
#define ns_to_kernel_old_timeval(ktime) ns_to_timeval(ktime.tv64)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_SET_SNDTIMEO
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
sock_set_sndtimeo(sock->sk, secs);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
sockptr_t kopt;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
|
||||
kopt = KERNEL_SOCKPTR(&tv);
|
||||
|
||||
return sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO_NEW,
|
||||
kopt, sizeof(tv));
|
||||
}
|
||||
#else
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
struct timeval tv = { .tv_sec = secs, .tv_usec = 0 };
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_SETSOCKOPT_SOCKPTR_T
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
sockptr_t kopt = KERNEL_SOCKPTR(optval);
|
||||
return sock_setsockopt(sock, level, op, kopt, sizeof(optval));
|
||||
}
|
||||
#else
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
return kernel_setsockopt(sock, level, op, (char *)optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_HAVE_TCP_SET_SOCKFN
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepintvl(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepidle(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
tcp_sock_set_user_timeout(sock->sk, val);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
tcp_sock_set_nodelay(sock->sk);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
int optval = 1;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_INODE_DIO_END
|
||||
#define kc_inode_dio_end inode_dio_end
|
||||
#else
|
||||
#define kc_inode_dio_end inode_dio_done
|
||||
#endif
|
||||
|
||||
#ifndef KC_MM_VM_FAULT_T
|
||||
typedef unsigned int vm_fault_t;
|
||||
static inline vm_fault_t vmf_error(int err)
|
||||
{
|
||||
if (err == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
#ifndef KC_LIST_LRU_SHRINK_COUNT_WALK
|
||||
/* we don't bother with sc->{nid,memcg} (which doesn't exist in oldest kernels) */
|
||||
static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
return list_lru_count(lru);
|
||||
}
|
||||
static inline unsigned long
|
||||
list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
list_lru_walk_cb isolate, void *cb_arg)
|
||||
{
|
||||
return list_lru_walk(lru, isolate, cb_arg, sc->nr_to_scan);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_LIST_LRU_ADD_OBJ
|
||||
#define list_lru_add_obj list_lru_add
|
||||
#define list_lru_del_obj list_lru_del
|
||||
#endif
|
||||
|
||||
#if defined(KC_LIST_LRU_WALK_CB_LIST_LOCK) || defined(KC_LIST_LRU_WALK_CB_ITEM_LOCK)
|
||||
struct list_lru_one;
|
||||
typedef enum lru_status (*kc_list_lru_walk_cb_t)(struct list_head *item, struct list_lru_one *list,
|
||||
void *cb_arg);
|
||||
struct kc_isolate_args {
|
||||
kc_list_lru_walk_cb_t isolate;
|
||||
void *cb_arg;
|
||||
};
|
||||
unsigned long kc_list_lru_walk(struct list_lru *lru, kc_list_lru_walk_cb_t isolate, void *cb_arg,
|
||||
unsigned long nr_to_walk);
|
||||
unsigned long kc_list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
kc_list_lru_walk_cb_t isolate, void *cb_arg);
|
||||
#else
|
||||
#define kc_list_lru_shrink_walk list_lru_shrink_walk
|
||||
#endif
|
||||
|
||||
#if defined(KC_LIST_LRU_WALK_CB_ITEM_LOCK)
|
||||
/* isolate moved by hand, nr_items updated in walk as _REMOVE returned */
|
||||
static inline void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
|
||||
struct list_head *head)
|
||||
{
|
||||
list_move(item, head);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -125,8 +125,8 @@ static inline bool scoutfs_key_is_ones(struct scoutfs_key *key)
|
||||
* other alternatives across keys that first differ in any of the
|
||||
* values. Say maybe 20% faster than memcmp.
|
||||
*/
|
||||
static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
struct scoutfs_key *b)
|
||||
static inline int scoutfs_key_compare(const struct scoutfs_key *a,
|
||||
const struct scoutfs_key *b)
|
||||
{
|
||||
return scoutfs_cmp(a->sk_zone, b->sk_zone) ?:
|
||||
scoutfs_cmp(le64_to_cpu(a->_sk_first), le64_to_cpu(b->_sk_first)) ?:
|
||||
@@ -142,10 +142,10 @@ static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
* 1: a_start > b_end
|
||||
* else 0: ranges overlap
|
||||
*/
|
||||
static inline int scoutfs_key_compare_ranges(struct scoutfs_key *a_start,
|
||||
struct scoutfs_key *a_end,
|
||||
struct scoutfs_key *b_start,
|
||||
struct scoutfs_key *b_end)
|
||||
static inline int scoutfs_key_compare_ranges(const struct scoutfs_key *a_start,
|
||||
const struct scoutfs_key *a_end,
|
||||
const struct scoutfs_key *b_start,
|
||||
const struct scoutfs_key *b_end)
|
||||
{
|
||||
return scoutfs_key_compare(a_end, b_start) < 0 ? -1 :
|
||||
scoutfs_key_compare(a_start, b_end) > 0 ? 1 :
|
||||
|
||||
203
kmod/src/lock.c
203
kmod/src/lock.c
@@ -12,12 +12,12 @@
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/preempt_mask.h> /* a rhel shed.h needed preempt_offset? */
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/posix_acl.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "lock.h"
|
||||
@@ -35,6 +35,9 @@
|
||||
#include "xattr.h"
|
||||
#include "item.h"
|
||||
#include "omap.h"
|
||||
#include "util.h"
|
||||
#include "totl.h"
|
||||
#include "quota.h"
|
||||
|
||||
/*
|
||||
* scoutfs uses a lock service to manage item cache consistency between
|
||||
@@ -76,7 +79,7 @@ struct lock_info {
|
||||
bool unmounting;
|
||||
struct rb_root lock_tree;
|
||||
struct rb_root lock_range_tree;
|
||||
struct shrinker shrinker;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
struct list_head lru_list;
|
||||
unsigned long long lru_nr;
|
||||
struct workqueue_struct *workq;
|
||||
@@ -129,20 +132,17 @@ static bool lock_modes_match(int granted, int requested)
|
||||
* allows deletions to be performed by unlink without having to wait for
|
||||
* remote cached inodes to be dropped.
|
||||
*
|
||||
* If the cached inode was already deferring final inode deletion then
|
||||
* we can't perform that inline in invalidation. The locking alone
|
||||
* deadlock, and it might also take multiple transactions to fully
|
||||
* delete an inode with significant metadata. We only perform the iput
|
||||
* inline if we know that possible eviction can't perform the final
|
||||
* deletion, otherwise we kick it off to async work.
|
||||
* We kick the d_prune and iput off to async work because they can end
|
||||
* up in final iput and inode eviction item deletion which would
|
||||
* deadlock. d_prune->dput can end up in iput on parents in different
|
||||
* locks entirely.
|
||||
*/
|
||||
static void invalidate_inode(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_inode_info *si;
|
||||
struct inode *inode;
|
||||
|
||||
inode = scoutfs_ilookup(sb, ino);
|
||||
inode = scoutfs_ilookup_nowait_nonewfree(sb, ino);
|
||||
if (inode) {
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
@@ -152,17 +152,9 @@ static void invalidate_inode(struct super_block *sb, u64 ino)
|
||||
scoutfs_data_wait_changed(inode);
|
||||
}
|
||||
|
||||
/* can't touch during unmount, dcache destroys w/o locks */
|
||||
if (!linfo->unmounting)
|
||||
d_prune_aliases(inode);
|
||||
forget_all_cached_acls(inode);
|
||||
|
||||
si->drop_invalidated = true;
|
||||
if (scoutfs_lock_is_covered(sb, &si->ino_lock_cov) && inode->i_nlink > 0) {
|
||||
iput(inode);
|
||||
} else {
|
||||
/* defer iput to work context so we don't evict inodes from invalidation */
|
||||
scoutfs_inode_queue_iput(inode);
|
||||
}
|
||||
scoutfs_inode_queue_iput(inode, SI_IPUT_FLAG_PRUNE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,7 +168,6 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode prev, enum scoutfs_lock_mode mode)
|
||||
{
|
||||
struct scoutfs_lock_coverage *cov;
|
||||
struct scoutfs_lock_coverage *tmp;
|
||||
u64 ino, last;
|
||||
int ret = 0;
|
||||
|
||||
@@ -195,10 +186,31 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (lock->start.sk_zone == SCOUTFS_QUOTA_ZONE && !lock_mode_can_read(mode))
|
||||
scoutfs_quota_invalidate(sb);
|
||||
|
||||
/* have to invalidate if we're not in the only usable case */
|
||||
if (!(prev == SCOUTFS_LOCK_WRITE && mode == SCOUTFS_LOCK_READ)) {
|
||||
retry:
|
||||
/* invalidate inodes before removing coverage */
|
||||
/*
|
||||
* Remove cov items to tell users that their cache is
|
||||
* stale. The unlock pattern comes from avoiding bad
|
||||
* sparse warnings when taking else in a failed trylock.
|
||||
*/
|
||||
spin_lock(&lock->cov_list_lock);
|
||||
while ((cov = list_first_entry_or_null(&lock->cov_list,
|
||||
struct scoutfs_lock_coverage, head))) {
|
||||
if (spin_trylock(&cov->cov_lock)) {
|
||||
list_del_init(&cov->head);
|
||||
cov->lock = NULL;
|
||||
spin_unlock(&cov->cov_lock);
|
||||
scoutfs_inc_counter(sb, lock_invalidate_coverage);
|
||||
}
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
spin_lock(&lock->cov_list_lock);
|
||||
}
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
|
||||
/* invalidate inodes after removing coverage so drop/evict aren't covered */
|
||||
if (lock->start.sk_zone == SCOUTFS_FS_ZONE) {
|
||||
ino = le64_to_cpu(lock->start.ski_ino);
|
||||
last = le64_to_cpu(lock->end.ski_ino);
|
||||
@@ -208,21 +220,6 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
/* remove cov items to tell users that their cache is stale */
|
||||
spin_lock(&lock->cov_list_lock);
|
||||
list_for_each_entry_safe(cov, tmp, &lock->cov_list, head) {
|
||||
if (!spin_trylock(&cov->cov_lock)) {
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
list_del_init(&cov->head);
|
||||
cov->lock = NULL;
|
||||
spin_unlock(&cov->cov_lock);
|
||||
scoutfs_inc_counter(sb, lock_invalidate_coverage);
|
||||
}
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
|
||||
scoutfs_item_invalidate(sb, &lock->start, &lock->end);
|
||||
}
|
||||
|
||||
@@ -255,7 +252,7 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
BUG_ON(!list_empty(&lock->cov_list));
|
||||
|
||||
scoutfs_omap_free_lock_data(lock->omap_data);
|
||||
kfree(lock->inode_deletion_data);
|
||||
kfree(lock);
|
||||
}
|
||||
|
||||
@@ -289,9 +286,9 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
lock->sb = sb;
|
||||
init_waitqueue_head(&lock->waitq);
|
||||
lock->mode = SCOUTFS_LOCK_NULL;
|
||||
lock->invalidating_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
atomic64_set(&lock->forest_bloom_nr, 0);
|
||||
spin_lock_init(&lock->omap_spinlock);
|
||||
|
||||
trace_scoutfs_lock_alloc(sb, lock);
|
||||
|
||||
@@ -307,6 +304,7 @@ static void lock_inc_count(unsigned int *counts, enum scoutfs_lock_mode mode)
|
||||
static void lock_dec_count(unsigned int *counts, enum scoutfs_lock_mode mode)
|
||||
{
|
||||
BUG_ON(mode < 0 || mode >= SCOUTFS_LOCK_NR_MODES);
|
||||
BUG_ON(counts[mode] == 0);
|
||||
counts[mode]--;
|
||||
}
|
||||
|
||||
@@ -667,7 +665,9 @@ struct inv_req {
|
||||
*
|
||||
* Before we start invalidating the lock we set the lock to the new
|
||||
* mode, preventing further incompatible users of the old mode from
|
||||
* using the lock while we're invalidating.
|
||||
* using the lock while we're invalidating. We record the previously
|
||||
* granted mode so that we can send lock recover responses with the old
|
||||
* granted mode during invalidation.
|
||||
*/
|
||||
static void lock_invalidate_worker(struct work_struct *work)
|
||||
{
|
||||
@@ -692,7 +692,8 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
if (!lock_counts_match(nl->new_mode, lock->users))
|
||||
continue;
|
||||
|
||||
/* set the new mode, no incompatible users during inval */
|
||||
/* set the new mode, no incompatible users during inval, recov needs old */
|
||||
lock->invalidating_mode = lock->mode;
|
||||
lock->mode = nl->new_mode;
|
||||
|
||||
/* move everyone that's ready to our private list */
|
||||
@@ -735,6 +736,8 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
list_del(&ireq->head);
|
||||
kfree(ireq);
|
||||
|
||||
lock->invalidating_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
if (list_empty(&lock->inv_list)) {
|
||||
/* finish if another request didn't arrive */
|
||||
list_del_init(&lock->inv_head);
|
||||
@@ -825,6 +828,7 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_net_lock_recover *nlr;
|
||||
enum scoutfs_lock_mode mode;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *next;
|
||||
struct rb_node *node;
|
||||
@@ -845,10 +849,15 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
|
||||
for (i = 0; lock && i < SCOUTFS_NET_LOCK_MAX_RECOVER_NR; i++) {
|
||||
|
||||
if (lock->invalidating_mode != SCOUTFS_LOCK_NULL)
|
||||
mode = lock->invalidating_mode;
|
||||
else
|
||||
mode = lock->mode;
|
||||
|
||||
nlr->locks[i].key = lock->start;
|
||||
nlr->locks[i].write_seq = cpu_to_le64(lock->write_seq);
|
||||
nlr->locks[i].old_mode = lock->mode;
|
||||
nlr->locks[i].new_mode = lock->mode;
|
||||
nlr->locks[i].old_mode = mode;
|
||||
nlr->locks[i].new_mode = mode;
|
||||
|
||||
node = rb_next(&lock->node);
|
||||
if (node)
|
||||
@@ -1050,7 +1059,7 @@ int scoutfs_lock_inode(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
goto out;
|
||||
|
||||
if (flags & SCOUTFS_LKF_REFRESH_INODE) {
|
||||
ret = scoutfs_inode_refresh(inode, *lock, flags);
|
||||
ret = scoutfs_inode_refresh(inode, *lock);
|
||||
if (ret < 0) {
|
||||
scoutfs_unlock(sb, *lock, mode);
|
||||
*lock = NULL;
|
||||
@@ -1243,10 +1252,29 @@ int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode,
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(&end);
|
||||
end.sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_totl_set_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_xattr_indx_get_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_quota_get_lock_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
@@ -1345,7 +1373,7 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
|
||||
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
enum scoutfs_lock_mode mode)
|
||||
{
|
||||
signed char lock_mode = ACCESS_ONCE(lock->mode);
|
||||
signed char lock_mode = READ_ONCE(lock->mode);
|
||||
|
||||
return lock_modes_match(lock_mode, mode) &&
|
||||
scoutfs_key_compare_ranges(key, key,
|
||||
@@ -1400,6 +1428,17 @@ static void lock_shrink_worker(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long lock_count_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lock_info *linfo = KC_SHRINKER_CONTAINER_OF(shrink, struct lock_info);
|
||||
struct super_block *sb = linfo->sb;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_count_objects);
|
||||
|
||||
return shrinker_min_long(linfo->lru_nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the shrinking process for locks on the lru. If a lock is on
|
||||
* the lru then it can't have any active users. We don't want to block
|
||||
@@ -1412,21 +1451,18 @@ static void lock_shrink_worker(struct work_struct *work)
|
||||
* mode which will prevent the lock from being freed when the null
|
||||
* response arrives.
|
||||
*/
|
||||
static int scoutfs_lock_shrink(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
static unsigned long lock_scan_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lock_info *linfo = container_of(shrink, struct lock_info,
|
||||
shrinker);
|
||||
struct lock_info *linfo = KC_SHRINKER_CONTAINER_OF(shrink, struct lock_info);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
unsigned long nr;
|
||||
unsigned long freed = 0;
|
||||
unsigned long nr = sc->nr_to_scan;
|
||||
bool added = false;
|
||||
int ret;
|
||||
|
||||
nr = sc->nr_to_scan;
|
||||
if (nr == 0)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, lock_scan_objects);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
@@ -1444,6 +1480,7 @@ restart:
|
||||
lock->request_pending = 1;
|
||||
list_add_tail(&lock->shrink_head, &linfo->shrink_list);
|
||||
added = true;
|
||||
freed++;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_shrink_attempted);
|
||||
trace_scoutfs_lock_shrink(sb, lock);
|
||||
@@ -1458,10 +1495,8 @@ restart:
|
||||
if (added)
|
||||
queue_work(linfo->workq, &linfo->shrink_work);
|
||||
|
||||
out:
|
||||
ret = min_t(unsigned long, linfo->lru_nr, INT_MAX);
|
||||
trace_scoutfs_lock_shrink_exit(sb, sc->nr_to_scan, ret);
|
||||
return ret;
|
||||
trace_scoutfs_lock_shrink_exit(sb, sc->nr_to_scan, freed);
|
||||
return freed;
|
||||
}
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb)
|
||||
@@ -1472,7 +1507,7 @@ void scoutfs_free_unused_locks(struct super_block *sb)
|
||||
.nr_to_scan = INT_MAX,
|
||||
};
|
||||
|
||||
linfo->shrinker.shrink(&linfo->shrinker, &sc);
|
||||
lock_scan_objects(KC_SHRINKER_FN(&linfo->shrinker), &sc);
|
||||
}
|
||||
|
||||
static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
@@ -1514,6 +1549,38 @@ void scoutfs_lock_flush_invalidate(struct super_block *sb)
|
||||
flush_work(&linfo->inv_work);
|
||||
}
|
||||
|
||||
static u64 get_held_lock_refresh_gen(struct super_block *sb, struct scoutfs_key *start)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
u64 refresh_gen = 0;
|
||||
|
||||
/* this can be called from all manner of places */
|
||||
if (!linfo)
|
||||
return 0;
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
lock = lock_lookup(sb, start, NULL);
|
||||
if (lock) {
|
||||
if (lock_mode_can_read(lock->mode))
|
||||
refresh_gen = lock->refresh_gen;
|
||||
}
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return refresh_gen;
|
||||
}
|
||||
|
||||
u64 scoutfs_lock_ino_refresh_gen(struct super_block *sb, u64 ino)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_FS_ZONE;
|
||||
start.ski_ino = cpu_to_le64(ino & ~(u64)SCOUTFS_LOCK_INODE_GROUP_MASK);
|
||||
|
||||
return get_held_lock_refresh_gen(sb, &start);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is going to be shutting down transactions and the client.
|
||||
* We need to make sure that locking won't call either after we return.
|
||||
@@ -1547,7 +1614,7 @@ void scoutfs_lock_shutdown(struct super_block *sb)
|
||||
trace_scoutfs_lock_shutdown(sb, linfo);
|
||||
|
||||
/* stop the shrinker from queueing work */
|
||||
unregister_shrinker(&linfo->shrinker);
|
||||
KC_UNREGISTER_SHRINKER(&linfo->shrinker);
|
||||
flush_work(&linfo->shrink_work);
|
||||
|
||||
/* cause current and future lock calls to return errors */
|
||||
@@ -1666,9 +1733,9 @@ int scoutfs_lock_setup(struct super_block *sb)
|
||||
spin_lock_init(&linfo->lock);
|
||||
linfo->lock_tree = RB_ROOT;
|
||||
linfo->lock_range_tree = RB_ROOT;
|
||||
linfo->shrinker.shrink = scoutfs_lock_shrink;
|
||||
linfo->shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&linfo->shrinker);
|
||||
KC_INIT_SHRINKER_FUNCS(&linfo->shrinker, lock_count_objects,
|
||||
lock_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&linfo->shrinker, "scoutfs-lock:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_LIST_HEAD(&linfo->lru_list);
|
||||
INIT_WORK(&linfo->inv_work, lock_invalidate_worker);
|
||||
INIT_LIST_HEAD(&linfo->inv_list);
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
|
||||
|
||||
struct scoutfs_omap_lock;
|
||||
struct inode_deletion_lock_data;
|
||||
|
||||
/*
|
||||
* A few fields (start, end, refresh_gen, write_seq, granted_mode)
|
||||
@@ -39,6 +39,7 @@ struct scoutfs_lock {
|
||||
struct list_head cov_list;
|
||||
|
||||
enum scoutfs_lock_mode mode;
|
||||
enum scoutfs_lock_mode invalidating_mode;
|
||||
unsigned int waiters[SCOUTFS_LOCK_NR_MODES];
|
||||
unsigned int users[SCOUTFS_LOCK_NR_MODES];
|
||||
|
||||
@@ -47,9 +48,8 @@ struct scoutfs_lock {
|
||||
/* the forest tracks which log tree last saw bloom bit updates */
|
||||
atomic64_t forest_bloom_nr;
|
||||
|
||||
/* open ino mapping has a valid map for a held write lock */
|
||||
spinlock_t omap_spinlock;
|
||||
struct scoutfs_omap_lock_data *omap_data;
|
||||
/* inode deletion tracks some state per lock */
|
||||
struct inode_deletion_lock_data *inode_deletion_data;
|
||||
};
|
||||
|
||||
struct scoutfs_lock_coverage {
|
||||
@@ -86,6 +86,10 @@ int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
u64 ino, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
@@ -100,6 +104,8 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
|
||||
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
u64 scoutfs_lock_ino_refresh_gen(struct super_block *sb, u64 ino);
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb);
|
||||
|
||||
int scoutfs_lock_setup(struct super_block *sb);
|
||||
|
||||
@@ -153,30 +153,30 @@ enum {
|
||||
*/
|
||||
static void add_client_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
struct client_lock_entry *clent)
|
||||
struct client_lock_entry *c_ent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (list_empty(&clent->head))
|
||||
list_add_tail(&clent->head, list);
|
||||
if (list_empty(&c_ent->head))
|
||||
list_add_tail(&c_ent->head, list);
|
||||
else
|
||||
list_move_tail(&clent->head, list);
|
||||
list_move_tail(&c_ent->head, list);
|
||||
|
||||
clent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
c_ent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
list == &snode->requested ? OL_REQUESTED :
|
||||
OL_INVALIDATED;
|
||||
}
|
||||
|
||||
static void free_client_entry(struct lock_server_info *inf,
|
||||
struct server_lock_node *snode,
|
||||
struct client_lock_entry *clent)
|
||||
struct client_lock_entry *c_ent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (!list_empty(&clent->head))
|
||||
list_del_init(&clent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &clent->tseq_entry);
|
||||
kfree(clent);
|
||||
if (!list_empty(&c_ent->head))
|
||||
list_del_init(&c_ent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
kfree(c_ent);
|
||||
}
|
||||
|
||||
static bool invalid_mode(u8 mode)
|
||||
@@ -202,21 +202,48 @@ static u8 invalidation_mode(u8 granted, u8 requested)
|
||||
|
||||
/*
|
||||
* Return true of the client lock instances described by the entries can
|
||||
* be granted at the same time. Typically this only means they're both
|
||||
* modes that are compatible between nodes. In addition there's the
|
||||
* special case where a read lock on a client is compatible with a write
|
||||
* lock on the same client because the client's cache covered by the
|
||||
* read lock is still valid if they get a write lock.
|
||||
* be granted at the same time. There's only three cases where this is
|
||||
* true.
|
||||
*
|
||||
* First, the two locks are both of the same mode that allows full
|
||||
* sharing -- read and write only. The only point of these modes is
|
||||
* that everyone can share them.
|
||||
*
|
||||
* Second, a write lock gives the client permission to read as well.
|
||||
* This means that a client can upgrade its read lock to a write lock
|
||||
* without having to invalidate the existing read and drop caches.
|
||||
*
|
||||
* Third, null locks are always compatible between clients. It's as
|
||||
* though the client with the null lock has no lock at all. But it's
|
||||
* never compatible with all locks on the client requesting null.
|
||||
* Sending invalidations for existing locks on a client when we get a
|
||||
* null request is how we resolve races in shrinking locks -- we turn it
|
||||
* into the unsolicited remote invalidation case.
|
||||
*
|
||||
* All other mode and client combinations can not be shared, most
|
||||
* typically a write lock invalidating all other non-write holders to
|
||||
* drop caches and force a read after the write has completed.
|
||||
*/
|
||||
static bool client_entries_compatible(struct client_lock_entry *granted,
|
||||
struct client_lock_entry *requested)
|
||||
{
|
||||
return (granted->mode == requested->mode &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ ||
|
||||
granted->mode == SCOUTFS_LOCK_WRITE_ONLY)) ||
|
||||
(granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE);
|
||||
/* only read and write_only can be full shared */
|
||||
if ((granted->mode == requested->mode) &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ || granted->mode == SCOUTFS_LOCK_WRITE_ONLY))
|
||||
return true;
|
||||
|
||||
/* _write includes reading, so a client can upgrade its read to write */
|
||||
if (granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE)
|
||||
return true;
|
||||
|
||||
/* null is always compatible across clients, never within a client */
|
||||
if ((granted->rid != requested->rid) &&
|
||||
(granted->mode == SCOUTFS_LOCK_NULL || requested->mode == SCOUTFS_LOCK_NULL))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -317,16 +344,18 @@ static void put_server_lock(struct lock_server_info *inf,
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
|
||||
if (atomic_dec_and_test(&snode->refcount) &&
|
||||
list_empty(&snode->granted) &&
|
||||
list_empty(&snode->requested) &&
|
||||
list_empty(&snode->invalidated)) {
|
||||
spin_lock(&inf->lock);
|
||||
rb_erase(&snode->node, &inf->locks_root);
|
||||
spin_unlock(&inf->lock);
|
||||
should_free = true;
|
||||
}
|
||||
|
||||
spin_unlock(&inf->lock);
|
||||
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
if (should_free) {
|
||||
@@ -339,13 +368,13 @@ static struct client_lock_entry *find_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
u64 rid)
|
||||
{
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
list_for_each_entry(clent, list, head) {
|
||||
if (clent->rid == rid)
|
||||
return clent;
|
||||
list_for_each_entry(c_ent, list, head) {
|
||||
if (c_ent->rid == rid)
|
||||
return c_ent;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -364,7 +393,7 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
u64 net_id, struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -376,29 +405,29 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = net_id;
|
||||
clent->mode = nl->new_mode;
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = net_id;
|
||||
c_ent->mode = nl->new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nl->key);
|
||||
if (snode == NULL) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snode->stats[SLT_REQUEST]++;
|
||||
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
out:
|
||||
@@ -417,7 +446,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -438,18 +467,18 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
|
||||
snode->stats[SLT_RESPONSE]++;
|
||||
|
||||
clent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!clent) {
|
||||
c_ent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!c_ent) {
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nl->new_mode == SCOUTFS_LOCK_NULL) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
} else {
|
||||
clent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
c_ent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
}
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
@@ -632,7 +661,7 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *existing;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
@@ -652,35 +681,35 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
}
|
||||
|
||||
for (i = 0; i < le16_to_cpu(nlr->nr); i++) {
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = 0;
|
||||
clent->mode = nlr->locks[i].new_mode;
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = 0;
|
||||
c_ent->mode = nlr->locks[i].new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nlr->locks[i].key);
|
||||
if (snode == NULL) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
existing = find_entry(snode, &snode->granted, rid);
|
||||
if (existing) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
@@ -707,7 +736,7 @@ out:
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *tmp;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
@@ -724,9 +753,9 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
(list == &snode->requested) ? &snode->invalidated :
|
||||
NULL) {
|
||||
|
||||
list_for_each_entry_safe(clent, tmp, list, head) {
|
||||
if (clent->rid == rid) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
list_for_each_entry_safe(c_ent, tmp, list, head) {
|
||||
if (c_ent->rid == rid) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
freed = true;
|
||||
}
|
||||
}
|
||||
@@ -749,7 +778,7 @@ out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server err %d during client rid %016llx farewell, shutting down",
|
||||
ret, rid);
|
||||
scoutfs_server_abort(sb);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -787,15 +816,15 @@ static char *lock_on_list_string(u8 on_list)
|
||||
static void lock_server_tseq_show(struct seq_file *m,
|
||||
struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
struct client_lock_entry *clent = container_of(ent,
|
||||
struct client_lock_entry *c_ent = container_of(ent,
|
||||
struct client_lock_entry,
|
||||
tseq_entry);
|
||||
struct server_lock_node *snode = clent->snode;
|
||||
struct server_lock_node *snode = c_ent->snode;
|
||||
|
||||
seq_printf(m, SK_FMT" %s %s rid %016llx net_id %llu\n",
|
||||
SK_ARG(&snode->key), lock_mode_string(clent->mode),
|
||||
lock_on_list_string(clent->on_list), clent->rid,
|
||||
clent->net_id);
|
||||
SK_ARG(&snode->key), lock_mode_string(c_ent->mode),
|
||||
lock_on_list_string(c_ent->on_list), c_ent->rid,
|
||||
c_ent->net_id);
|
||||
}
|
||||
|
||||
static void stats_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
@@ -857,7 +886,7 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct server_lock_node *snode;
|
||||
struct server_lock_node *stmp;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *ctmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
@@ -873,8 +902,8 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
list_splice_init(&snode->invalidated, &list);
|
||||
|
||||
mutex_lock(&snode->mutex);
|
||||
list_for_each_entry_safe(clent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
list_for_each_entry_safe(c_ent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
}
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
|
||||
616
kmod/src/net.c
616
kmod/src/net.c
@@ -20,6 +20,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/jhash.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -31,6 +32,7 @@
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
#include "fence.h"
|
||||
#include "options.h"
|
||||
|
||||
/*
|
||||
* scoutfs networking delivers requests and responses between nodes.
|
||||
@@ -134,6 +136,7 @@ struct message_send {
|
||||
struct message_recv {
|
||||
struct scoutfs_tseq_entry tseq_entry;
|
||||
struct work_struct proc_work;
|
||||
struct list_head ordered_head;
|
||||
struct scoutfs_net_connection *conn;
|
||||
struct scoutfs_net_header nh;
|
||||
};
|
||||
@@ -332,7 +335,7 @@ static int submit_send(struct super_block *sb,
|
||||
return -EINVAL;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
return -ENOLINK;
|
||||
|
||||
msend = kmalloc(offsetof(struct message_send,
|
||||
nh.data[data_len]), GFP_NOFS);
|
||||
@@ -355,6 +358,7 @@ static int submit_send(struct super_block *sb,
|
||||
}
|
||||
if (rid != 0) {
|
||||
spin_unlock(&conn->lock);
|
||||
kfree(msend);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
}
|
||||
@@ -497,16 +501,61 @@ static void scoutfs_net_proc_worker(struct work_struct *work)
|
||||
trace_scoutfs_net_proc_work_exit(sb, 0, ret);
|
||||
}
|
||||
|
||||
static void scoutfs_net_ordered_proc_worker(struct work_struct *work)
|
||||
{
|
||||
struct scoutfs_work_list *wlist = container_of(work, struct scoutfs_work_list, work);
|
||||
struct message_recv *mrecv;
|
||||
struct message_recv *mrecv__;
|
||||
LIST_HEAD(list);
|
||||
|
||||
spin_lock(&wlist->lock);
|
||||
list_splice_init(&wlist->list, &list);
|
||||
spin_unlock(&wlist->lock);
|
||||
|
||||
list_for_each_entry_safe(mrecv, mrecv__, &list, ordered_head) {
|
||||
list_del_init(&mrecv->ordered_head);
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Some messages require in-order processing. But the scope of the
|
||||
* ordering isn't global. In the case of lock messages, it's per lock.
|
||||
* So for these messages we hash them to a number of ordered workers who
|
||||
* walk a list and call the usual work function in order. This replaced
|
||||
* first the proc work detecting OOO and re-ordering, and then only
|
||||
* calling proc from the one recv work context.
|
||||
*/
|
||||
static void queue_ordered_proc(struct scoutfs_net_connection *conn, struct message_recv *mrecv)
|
||||
{
|
||||
struct scoutfs_work_list *wlist;
|
||||
struct scoutfs_net_lock *nl;
|
||||
u32 h;
|
||||
|
||||
if (WARN_ON_ONCE(mrecv->nh.cmd != SCOUTFS_NET_CMD_LOCK ||
|
||||
le16_to_cpu(mrecv->nh.data_len) != sizeof(struct scoutfs_net_lock)))
|
||||
return scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
|
||||
nl = (void *)mrecv->nh.data;
|
||||
h = jhash(&nl->key, sizeof(struct scoutfs_key), 0x6fdd3cd5);
|
||||
wlist = &conn->ordered_proc_wlists[h % conn->ordered_proc_nr];
|
||||
|
||||
spin_lock(&wlist->lock);
|
||||
list_add_tail(&mrecv->ordered_head, &wlist->list);
|
||||
spin_unlock(&wlist->lock);
|
||||
queue_work(conn->workq, &wlist->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free live responses up to and including the seq by marking them dead
|
||||
* and moving them to the send queue to be freed.
|
||||
*/
|
||||
static int move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
static bool move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
int ret = 0;
|
||||
bool moved = false;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
@@ -518,20 +567,20 @@ static int move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
ret = 1;
|
||||
moved = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return moved;
|
||||
}
|
||||
|
||||
/* acks are processed inline in the recv worker */
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
int moved;
|
||||
bool moved;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) +
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) |
|
||||
move_acked_responses(conn, &conn->resend_queue, seq);
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
@@ -540,29 +589,17 @@ static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
queue_work(conn->workq, &conn->send_work);
|
||||
}
|
||||
|
||||
static int recvmsg_full(struct socket *sock, void *buf, unsigned len)
|
||||
static int k_recvmsg(struct socket *sock, void *buf, unsigned len)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct kvec kv;
|
||||
int ret;
|
||||
struct kvec kv = {
|
||||
.iov_base = buf,
|
||||
.iov_len = len,
|
||||
};
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_NOSIGNAL,
|
||||
};
|
||||
|
||||
while (len) {
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
kv.iov_base = buf;
|
||||
kv.iov_len = len;
|
||||
|
||||
ret = kernel_recvmsg(sock, &msg, &kv, 1, len, msg.msg_flags);
|
||||
if (ret <= 0)
|
||||
return -ECONNABORTED;
|
||||
|
||||
len -= ret;
|
||||
buf += ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return kernel_recvmsg(sock, &msg, &kv, 1, len, msg.msg_flags);
|
||||
}
|
||||
|
||||
static bool invalid_message(struct scoutfs_net_connection *conn,
|
||||
@@ -599,6 +636,72 @@ static bool invalid_message(struct scoutfs_net_connection *conn,
|
||||
return false;
|
||||
}
|
||||
|
||||
static int recv_one_message(struct super_block *sb, struct net_info *ninf,
|
||||
struct scoutfs_net_connection *conn, struct scoutfs_net_header *nh,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct message_recv *mrecv;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, net_recv_messages);
|
||||
scoutfs_add_counter(sb, net_recv_bytes, nh_bytes(data_len));
|
||||
trace_scoutfs_net_recv_message(sb, &conn->sockname, &conn->peername, nh);
|
||||
|
||||
/* caller's invalid message checked data len */
|
||||
mrecv = kmalloc(offsetof(struct message_recv, nh.data[data_len]), GFP_NOFS);
|
||||
if (!mrecv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mrecv->conn = conn;
|
||||
INIT_WORK(&mrecv->proc_work, scoutfs_net_proc_worker);
|
||||
INIT_LIST_HEAD(&mrecv->ordered_head);
|
||||
mrecv->nh = *nh;
|
||||
if (data_len)
|
||||
memcpy(mrecv->nh.data, (nh + 1), data_len);
|
||||
|
||||
if (nh->cmd == SCOUTFS_NET_CMD_GREETING) {
|
||||
/* greetings are out of band, no seq mechanics */
|
||||
set_conn_fl(conn, saw_greeting);
|
||||
|
||||
} else if (le64_to_cpu(nh->seq) <=
|
||||
atomic64_read(&conn->recv_seq)) {
|
||||
/* drop any resent duplicated messages */
|
||||
scoutfs_inc_counter(sb, net_recv_dropped_duplicate);
|
||||
kfree(mrecv);
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
} else {
|
||||
/* record that we've received sender's seq */
|
||||
atomic64_set(&conn->recv_seq, le64_to_cpu(nh->seq));
|
||||
/* and free our responses that sender has received */
|
||||
free_acked_responses(conn, le64_to_cpu(nh->recv_seq));
|
||||
}
|
||||
|
||||
scoutfs_tseq_add(&ninf->msg_tseq_tree, &mrecv->tseq_entry);
|
||||
|
||||
/*
|
||||
* Initial received greetings are processed inline
|
||||
* before any other incoming messages.
|
||||
*
|
||||
* Incoming requests or responses to the lock client
|
||||
* can't handle re-ordering, so they're queued to
|
||||
* ordered receive processing work.
|
||||
*/
|
||||
if (nh->cmd == SCOUTFS_NET_CMD_GREETING)
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
else if (nh->cmd == SCOUTFS_NET_CMD_LOCK && !conn->listening_conn)
|
||||
queue_ordered_proc(conn, mrecv);
|
||||
else
|
||||
queue_work(conn->workq, &mrecv->proc_work);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Always block receiving from the socket. Errors trigger shutting down
|
||||
* the connection.
|
||||
@@ -609,86 +712,72 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
struct super_block *sb = conn->sb;
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct socket *sock = conn->sock;
|
||||
struct scoutfs_net_header nh;
|
||||
struct message_recv *mrecv;
|
||||
struct scoutfs_net_header *nh;
|
||||
struct page *page = NULL;
|
||||
unsigned int data_len;
|
||||
int hdr_off;
|
||||
int rx_off;
|
||||
int size;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_recv_work_enter(sb, 0, 0);
|
||||
|
||||
page = alloc_page(GFP_NOFS);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hdr_off = 0;
|
||||
rx_off = 0;
|
||||
|
||||
for (;;) {
|
||||
/* receive the header */
|
||||
ret = recvmsg_full(sock, &nh, sizeof(nh));
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/* receiving an invalid message breaks the connection */
|
||||
if (invalid_message(conn, &nh)) {
|
||||
scoutfs_inc_counter(sb, net_recv_invalid_message);
|
||||
ret = -EBADMSG;
|
||||
break;
|
||||
ret = k_recvmsg(sock, page_address(page) + rx_off, PAGE_SIZE - rx_off);
|
||||
if (ret <= 0) {
|
||||
ret = -ECONNABORTED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
data_len = le16_to_cpu(nh.data_len);
|
||||
rx_off += ret;
|
||||
|
||||
scoutfs_inc_counter(sb, net_recv_messages);
|
||||
scoutfs_add_counter(sb, net_recv_bytes, nh_bytes(data_len));
|
||||
trace_scoutfs_net_recv_message(sb, &conn->sockname,
|
||||
&conn->peername, &nh);
|
||||
for (;;) {
|
||||
size = rx_off - hdr_off;
|
||||
if (size < sizeof(struct scoutfs_net_header))
|
||||
break;
|
||||
|
||||
/* invalid message checked data len */
|
||||
mrecv = kmalloc(offsetof(struct message_recv,
|
||||
nh.data[data_len]), GFP_NOFS);
|
||||
if (!mrecv) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
nh = page_address(page) + hdr_off;
|
||||
|
||||
/* receiving an invalid message breaks the connection */
|
||||
if (invalid_message(conn, nh)) {
|
||||
scoutfs_inc_counter(sb, net_recv_invalid_message);
|
||||
ret = -EBADMSG;
|
||||
break;
|
||||
}
|
||||
|
||||
data_len = le16_to_cpu(nh->data_len);
|
||||
if (sizeof(struct scoutfs_net_header) + data_len > size)
|
||||
break;
|
||||
|
||||
ret = recv_one_message(sb, ninf, conn, nh, data_len);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
hdr_off += sizeof(struct scoutfs_net_header) + data_len;
|
||||
}
|
||||
|
||||
mrecv->conn = conn;
|
||||
INIT_WORK(&mrecv->proc_work, scoutfs_net_proc_worker);
|
||||
mrecv->nh = nh;
|
||||
|
||||
/* receive the data payload */
|
||||
ret = recvmsg_full(sock, mrecv->nh.data, data_len);
|
||||
if (ret) {
|
||||
kfree(mrecv);
|
||||
break;
|
||||
if ((PAGE_SIZE - rx_off) <
|
||||
(sizeof(struct scoutfs_net_header) + SCOUTFS_NET_MAX_DATA_LEN)) {
|
||||
if (size)
|
||||
memmove(page_address(page), page_address(page) + hdr_off, size);
|
||||
hdr_off = 0;
|
||||
rx_off = size;
|
||||
}
|
||||
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING) {
|
||||
/* greetings are out of band, no seq mechanics */
|
||||
set_conn_fl(conn, saw_greeting);
|
||||
|
||||
} else if (le64_to_cpu(nh.seq) <=
|
||||
atomic64_read(&conn->recv_seq)) {
|
||||
/* drop any resent duplicated messages */
|
||||
scoutfs_inc_counter(sb, net_recv_dropped_duplicate);
|
||||
kfree(mrecv);
|
||||
continue;
|
||||
|
||||
} else {
|
||||
/* record that we've received sender's seq */
|
||||
atomic64_set(&conn->recv_seq, le64_to_cpu(nh.seq));
|
||||
/* and free our responses that sender has received */
|
||||
free_acked_responses(conn, le64_to_cpu(nh.recv_seq));
|
||||
}
|
||||
|
||||
scoutfs_tseq_add(&ninf->msg_tseq_tree, &mrecv->tseq_entry);
|
||||
|
||||
/*
|
||||
* Initial received greetings are processed
|
||||
* synchronously before any other incoming messages.
|
||||
*
|
||||
* Incoming requests or responses to the lock client are
|
||||
* called synchronously to avoid reordering.
|
||||
*/
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING ||
|
||||
(nh.cmd == SCOUTFS_NET_CMD_LOCK && !conn->listening_conn))
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
else
|
||||
queue_work(conn->workq, &mrecv->proc_work);
|
||||
}
|
||||
|
||||
out:
|
||||
__free_page(page);
|
||||
|
||||
if (ret)
|
||||
scoutfs_inc_counter(sb, net_recv_error);
|
||||
|
||||
@@ -698,29 +787,41 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
trace_scoutfs_net_recv_work_exit(sb, 0, ret);
|
||||
}
|
||||
|
||||
static int sendmsg_full(struct socket *sock, void *buf, unsigned len)
|
||||
/*
|
||||
* This consumes the kvec.
|
||||
*/
|
||||
static int k_sendmsg_full(struct socket *sock, struct kvec *kv, unsigned long nr_segs, size_t count)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct kvec kv;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
while (len) {
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
kv.iov_base = buf;
|
||||
kv.iov_len = len;
|
||||
while (count > 0) {
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_NOSIGNAL,
|
||||
};
|
||||
|
||||
ret = kernel_sendmsg(sock, &msg, &kv, 1, len);
|
||||
if (ret <= 0)
|
||||
return -ECONNABORTED;
|
||||
ret = kernel_sendmsg(sock, &msg, kv, nr_segs, count);
|
||||
if (ret <= 0) {
|
||||
ret = -ECONNABORTED;
|
||||
break;
|
||||
}
|
||||
|
||||
len -= ret;
|
||||
buf += ret;
|
||||
count -= ret;
|
||||
if (count) {
|
||||
while (nr_segs > 0 && ret >= kv->iov_len) {
|
||||
ret -= kv->iov_len;
|
||||
kv++;
|
||||
nr_segs--;
|
||||
}
|
||||
if (nr_segs > 0 && ret > 0) {
|
||||
kv->iov_base += ret;
|
||||
kv->iov_len -= ret;
|
||||
}
|
||||
BUG_ON(nr_segs == 0);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_msend(struct net_info *ninf, struct message_send *msend)
|
||||
@@ -751,54 +852,73 @@ static void scoutfs_net_send_worker(struct work_struct *work)
|
||||
struct super_block *sb = conn->sb;
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct message_send *msend;
|
||||
int ret = 0;
|
||||
struct message_send *_msend_;
|
||||
struct kvec kv[16];
|
||||
unsigned long nr_segs;
|
||||
size_t count;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_send_work_enter(sb, 0, 0);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
while ((msend = list_first_entry_or_null(&conn->send_queue,
|
||||
struct message_send, head))) {
|
||||
|
||||
if (msend->dead) {
|
||||
free_msend(ninf, msend);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((msend->nh.cmd == SCOUTFS_NET_CMD_FAREWELL) &&
|
||||
nh_is_response(&msend->nh)) {
|
||||
set_conn_fl(conn, saw_farewell);
|
||||
}
|
||||
|
||||
msend->nh.recv_seq =
|
||||
cpu_to_le64(atomic64_read(&conn->recv_seq));
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
len = nh_bytes(le16_to_cpu(msend->nh.data_len));
|
||||
|
||||
scoutfs_inc_counter(sb, net_send_messages);
|
||||
scoutfs_add_counter(sb, net_send_bytes, len);
|
||||
trace_scoutfs_net_send_message(sb, &conn->sockname,
|
||||
&conn->peername, &msend->nh);
|
||||
|
||||
ret = sendmsg_full(conn->sock, &msend->nh, len);
|
||||
for (;;) {
|
||||
nr_segs = 0;
|
||||
count = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
list_for_each_entry_safe(msend, _msend_, &conn->send_queue, head) {
|
||||
if (msend->dead) {
|
||||
free_msend(ninf, msend);
|
||||
continue;
|
||||
}
|
||||
|
||||
msend->nh.recv_seq = 0;
|
||||
len = nh_bytes(le16_to_cpu(msend->nh.data_len));
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
if ((msend->nh.cmd == SCOUTFS_NET_CMD_FAREWELL) &&
|
||||
nh_is_response(&msend->nh)) {
|
||||
set_conn_fl(conn, saw_farewell);
|
||||
}
|
||||
|
||||
/* resend if it wasn't freed while we sent */
|
||||
if (!msend->dead)
|
||||
list_move_tail(&msend->head, &conn->resend_queue);
|
||||
msend->nh.recv_seq = cpu_to_le64(atomic64_read(&conn->recv_seq));
|
||||
|
||||
scoutfs_inc_counter(sb, net_send_messages);
|
||||
scoutfs_add_counter(sb, net_send_bytes, len);
|
||||
trace_scoutfs_net_send_message(sb, &conn->sockname,
|
||||
&conn->peername, &msend->nh);
|
||||
|
||||
count += len;
|
||||
kv[nr_segs].iov_base = &msend->nh;
|
||||
kv[nr_segs].iov_len = len;
|
||||
if (++nr_segs == ARRAY_SIZE(kv))
|
||||
break;
|
||||
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
if (nr_segs == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = k_sendmsg_full(conn->sock, kv, nr_segs, count);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
list_for_each_entry_safe(msend, _msend_, &conn->send_queue, head) {
|
||||
msend->nh.recv_seq = 0;
|
||||
|
||||
/* resend if it wasn't freed while we sent */
|
||||
if (!msend->dead)
|
||||
list_move_tail(&msend->head, &conn->resend_queue);
|
||||
|
||||
if (--nr_segs == 0)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
out:
|
||||
if (ret) {
|
||||
scoutfs_inc_counter(sb, net_send_error);
|
||||
shutdown_conn(conn);
|
||||
@@ -835,17 +955,9 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
if (conn->listening_conn && conn->notify_down)
|
||||
conn->notify_down(sb, conn, conn->info, conn->rid);
|
||||
|
||||
/*
|
||||
* Usually networking is idle and we destroy pending sends, but when forcing unmount
|
||||
* we can have to wake up waiters by failing pending sends.
|
||||
*/
|
||||
list_splice_init(&conn->resend_queue, &conn->send_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head) {
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
call_resp_func(sb, conn, msend->resp_func, msend->resp_data,
|
||||
NULL, 0, -ECONNABORTED);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head)
|
||||
free_msend(ninf, msend);
|
||||
}
|
||||
|
||||
/* accepted sockets are removed from their listener's list */
|
||||
if (conn->listening_conn) {
|
||||
@@ -861,6 +973,7 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
destroy_workqueue(conn->workq);
|
||||
scoutfs_tseq_del(&ninf->conn_tseq_tree, &conn->tseq_entry);
|
||||
kfree(conn->info);
|
||||
kfree(conn->ordered_proc_wlists);
|
||||
trace_scoutfs_conn_destroy_free(conn);
|
||||
kfree(conn);
|
||||
|
||||
@@ -886,7 +999,7 @@ static void destroy_conn(struct scoutfs_net_connection *conn)
|
||||
* The TCP_KEEP* and TCP_USER_TIMEOUT option interaction is subtle.
|
||||
* TCP_USER_TIMEOUT only applies if there is unacked written data in the
|
||||
* send queue. It doesn't work if the connection is idle. Adding
|
||||
* keepalice probes with user_timeout set changes how the keepalive
|
||||
* keepalive probes with user_timeout set changes how the keepalive
|
||||
* timeout is calculated. CNT no longer matters. Each time
|
||||
* additional probes (not the first) are sent the user timeout is
|
||||
* checked against the last time data was received. If none of the
|
||||
@@ -898,79 +1011,65 @@ static void destroy_conn(struct scoutfs_net_connection *conn)
|
||||
* elapses during the probe timer processing after the unsuccessful
|
||||
* probes.
|
||||
*/
|
||||
#define UNRESPONSIVE_TIMEOUT_SECS 10
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
static int sock_opts_and_names(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct socket *sock)
|
||||
{
|
||||
struct timeval tv;
|
||||
int addrlen;
|
||||
struct scoutfs_mount_options opts;
|
||||
int optval;
|
||||
int ret;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
/* we use a keepalive timeout instead of send timeout */
|
||||
tv.tv_sec = 0;
|
||||
tv.tv_usec = 0;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
ret = kc_sock_set_sndtimeo(sock, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* not checked when user_timeout != 0, but for clarity */
|
||||
optval = UNRESPONSIVE_PROBES;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
BUILD_BUG_ON(UNRESPONSIVE_PROBES >= UNRESPONSIVE_TIMEOUT_SECS);
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS - (UNRESPONSIVE_PROBES);
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
optval = (opts.tcp_keepalive_timeout_ms / MSEC_PER_SEC) - UNRESPONSIVE_PROBES;
|
||||
ret = kc_tcp_sock_set_keepidle(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_tcp_sock_set_keepintvl(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS * MSEC_PER_SEC;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
optval = opts.tcp_keepalive_timeout_ms;
|
||||
ret = kc_tcp_sock_set_user_timeout(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_tcp_sock_set_nodelay(sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
ret = kernel_getsockname(sock, (struct sockaddr *)&conn->sockname,
|
||||
&addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
ret = -EAFNOSUPPORT;
|
||||
if (ret)
|
||||
ret = kc_kernel_getsockname(sock, (struct sockaddr *)&conn->sockname);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
ret = kernel_getpeername(sock, (struct sockaddr *)&conn->peername,
|
||||
&addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
ret = -EAFNOSUPPORT;
|
||||
if (ret)
|
||||
ret = kc_kernel_getpeername(sock, (struct sockaddr *)&conn->peername);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
|
||||
conn->last_peername = conn->peername;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -999,6 +1098,8 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
acc_sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* inherit accepted request funcs from listening conn */
|
||||
acc_conn = scoutfs_net_alloc_conn(sb, conn->notify_up,
|
||||
conn->notify_down,
|
||||
@@ -1010,7 +1111,7 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = sock_opts_and_names(acc_conn, acc_sock);
|
||||
ret = sock_opts_and_names(sb, acc_conn, acc_sock);
|
||||
if (ret) {
|
||||
sock_release(acc_sock);
|
||||
destroy_conn(acc_conn);
|
||||
@@ -1052,20 +1153,18 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
DEFINE_CONN_FROM_WORK(conn, work, connect_work);
|
||||
struct super_block *sb = conn->sb;
|
||||
struct socket *sock;
|
||||
struct timeval tv;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_connect_work_enter(sb, 0, 0);
|
||||
|
||||
ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* caller specified connect timeout */
|
||||
tv.tv_sec = conn->connect_timeout_ms / MSEC_PER_SEC;
|
||||
tv.tv_usec = (conn->connect_timeout_ms % MSEC_PER_SEC) * USEC_PER_MSEC;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* caller specified connect timeout, defaults to 1 sec */
|
||||
ret = kc_sock_set_sndtimeo(sock, conn->connect_timeout_ms / MSEC_PER_SEC);
|
||||
if (ret) {
|
||||
sock_release(sock);
|
||||
goto out;
|
||||
@@ -1083,7 +1182,7 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = sock_opts_and_names(conn, sock);
|
||||
ret = sock_opts_and_names(sb, conn, sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1134,9 +1233,11 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct scoutfs_net_connection *listener;
|
||||
struct scoutfs_net_connection *acc_conn;
|
||||
scoutfs_net_response_t resp_func;
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
unsigned long delay;
|
||||
void *resp_data;
|
||||
|
||||
trace_scoutfs_net_shutdown_work_enter(sb, 0, 0);
|
||||
trace_scoutfs_conn_shutdown_start(conn);
|
||||
@@ -1182,6 +1283,30 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
/* and wait for accepted conn shutdown work to finish */
|
||||
wait_event(conn->waitq, empty_accepted_list(conn));
|
||||
|
||||
/*
|
||||
* Forced unmount will cause net submit to fail once it's
|
||||
* started and it calls shutdown to interrupt any previous
|
||||
* senders waiting for a response. The response callbacks can
|
||||
* do quite a lot of work so we're careful to call them outside
|
||||
* the lock.
|
||||
*/
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
spin_lock(&conn->lock);
|
||||
list_splice_tail_init(&conn->send_queue, &conn->resend_queue);
|
||||
while ((msend = list_first_entry_or_null(&conn->resend_queue,
|
||||
struct message_send, head))) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
free_msend(ninf, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
call_resp_func(sb, conn, resp_func, resp_data, NULL, 0, -ECONNABORTED);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
/* greetings aren't resent across sockets */
|
||||
@@ -1274,7 +1399,7 @@ restart:
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "client fence returned err %d, shutting down server",
|
||||
ret);
|
||||
scoutfs_server_abort(sb);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
}
|
||||
destroy_conn(acc);
|
||||
@@ -1318,23 +1443,30 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
{
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct scoutfs_net_connection *conn;
|
||||
unsigned int nr;
|
||||
unsigned int i;
|
||||
|
||||
nr = min_t(unsigned int, num_possible_cpus(),
|
||||
PAGE_SIZE / sizeof(struct scoutfs_work_list));
|
||||
|
||||
conn = kzalloc(sizeof(struct scoutfs_net_connection), GFP_NOFS);
|
||||
if (!conn)
|
||||
return NULL;
|
||||
|
||||
conn->info = kzalloc(info_size, GFP_NOFS);
|
||||
if (!conn->info) {
|
||||
kfree(conn);
|
||||
return NULL;
|
||||
if (conn) {
|
||||
if (info_size)
|
||||
conn->info = kzalloc(info_size, GFP_NOFS);
|
||||
conn->ordered_proc_wlists = kmalloc_array(nr, sizeof(struct scoutfs_work_list),
|
||||
GFP_NOFS);
|
||||
conn->workq = alloc_workqueue("scoutfs_net_%s",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0,
|
||||
name_suffix);
|
||||
}
|
||||
|
||||
conn->workq = alloc_workqueue("scoutfs_net_%s",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0,
|
||||
name_suffix);
|
||||
if (!conn->workq) {
|
||||
kfree(conn->info);
|
||||
kfree(conn);
|
||||
if (!conn || (info_size && !conn->info) || !conn->workq || !conn->ordered_proc_wlists) {
|
||||
if (conn) {
|
||||
kfree(conn->info);
|
||||
kfree(conn->ordered_proc_wlists);
|
||||
if (conn->workq)
|
||||
destroy_workqueue(conn->workq);
|
||||
kfree(conn);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1364,6 +1496,13 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
INIT_DELAYED_WORK(&conn->reconn_free_dwork,
|
||||
scoutfs_net_reconn_free_worker);
|
||||
|
||||
conn->ordered_proc_nr = nr;
|
||||
for (i = 0; i < nr; i++) {
|
||||
INIT_WORK(&conn->ordered_proc_wlists[i].work, scoutfs_net_ordered_proc_worker);
|
||||
spin_lock_init(&conn->ordered_proc_wlists[i].lock);
|
||||
INIT_LIST_HEAD(&conn->ordered_proc_wlists[i].list);
|
||||
}
|
||||
|
||||
scoutfs_tseq_add(&ninf->conn_tseq_tree, &conn->tseq_entry);
|
||||
trace_scoutfs_conn_alloc(conn);
|
||||
|
||||
@@ -1428,13 +1567,15 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(conn->sock))
|
||||
return -EINVAL;
|
||||
|
||||
ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1444,20 +1585,18 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
goto out;
|
||||
|
||||
ret = kernel_listen(sock, 255);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
ret = kernel_getsockname(sock, (struct sockaddr *)&conn->sockname,
|
||||
&addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
ret = -EAFNOSUPPORT;
|
||||
if (ret)
|
||||
ret = kc_kernel_getsockname(sock, (struct sockaddr *)&conn->sockname);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
|
||||
conn->sock = sock;
|
||||
*sin = conn->sockname;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (ret < 0 && sock)
|
||||
sock_release(sock);
|
||||
@@ -1754,23 +1893,6 @@ int scoutfs_net_response_node(struct super_block *sb,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* The response function that was submitted with the request is not
|
||||
* called if the request is canceled here.
|
||||
*/
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
msend = find_request(conn, cmd, id);
|
||||
if (msend)
|
||||
complete_send(conn, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
struct sync_request_completion {
|
||||
struct completion comp;
|
||||
void *resp;
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
#ifndef _SCOUTFS_NET_H_
|
||||
#define _SCOUTFS_NET_H_
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/in.h>
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
|
||||
struct scoutfs_work_list {
|
||||
struct work_struct work;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct scoutfs_net_connection;
|
||||
|
||||
/* These are called in their own blocking context */
|
||||
@@ -61,6 +69,8 @@ struct scoutfs_net_connection {
|
||||
struct list_head resend_queue;
|
||||
|
||||
atomic64_t recv_seq;
|
||||
unsigned int ordered_proc_nr;
|
||||
struct scoutfs_work_list *ordered_proc_wlists;
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct work_struct listen_work;
|
||||
@@ -134,9 +144,6 @@ int scoutfs_net_submit_request_node(struct super_block *sb,
|
||||
u64 rid, u8 cmd, void *arg, u16 arg_len,
|
||||
scoutfs_net_response_t resp_func,
|
||||
void *resp_data, u64 *id_ret);
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id);
|
||||
int scoutfs_net_sync_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, void *arg, unsigned arg_len,
|
||||
|
||||
313
kmod/src/omap.c
313
kmod/src/omap.c
@@ -30,27 +30,22 @@
|
||||
/*
|
||||
* As a client removes an inode from its cache with an nlink of 0 it
|
||||
* needs to decide if it is the last client using the inode and should
|
||||
* fully delete all its items. It needs to know if other mounts still
|
||||
* have the inode in use.
|
||||
* fully delete all the inode's items. It needs to know if other mounts
|
||||
* still have the inode in use.
|
||||
*
|
||||
* We need a way to communicate between mounts that an inode is open.
|
||||
* We need a way to communicate between mounts that an inode is in use.
|
||||
* We don't want to pay the synchronous per-file locking round trip
|
||||
* costs associated with per-inode open locks that you'd typically see
|
||||
* in systems to solve this problem.
|
||||
* in systems to solve this problem. The first prototypes of this
|
||||
* tracked open file handles so this was coined the open map, though it
|
||||
* now tracks cached inodes.
|
||||
*
|
||||
* Instead clients maintain open bitmaps that cover groups of inodes.
|
||||
* As inodes enter the cache their bit is set, and as the inode is
|
||||
* evicted the bit is cleared. As an inode is evicted messages are sent
|
||||
* around the cluster to get the current bitmaps for that inode's group
|
||||
* from all active mounts. If the inode's bit is clear then it can be
|
||||
* deleted.
|
||||
*
|
||||
* We associate the open bitmaps with our cluster locking of inode
|
||||
* groups to cache these open bitmaps. As long as we have the lock then
|
||||
* nlink can't be changed on any remote mounts. Specifically, it can't
|
||||
* increase from 0 so any clear bits can gain references on remote
|
||||
* mounts. As long as we have the lock, all clear bits in the group for
|
||||
* inodes with 0 nlink can be deleted.
|
||||
* Clients maintain bitmaps that cover groups of inodes. As inodes
|
||||
* enter the cache their bit is set and as the inode is evicted the bit
|
||||
* is cleared. As deletion is attempted, either by scanning orphans or
|
||||
* evicting an inode with an nlink of 0, messages are sent around the
|
||||
* cluster to get the current bitmaps for that inode's group from all
|
||||
* active mounts. If the inode's bit is clear then it can be deleted.
|
||||
*
|
||||
* This layer maintains a list of client rids to send messages to. The
|
||||
* server calls us as clients enter and leave the cluster. We can't
|
||||
@@ -85,14 +80,12 @@ struct omap_info {
|
||||
struct omap_info *name = SCOUTFS_SB(sb)->omap_info
|
||||
|
||||
/*
|
||||
* The presence of an inode in the inode cache increases the count of
|
||||
* its inode number's position within its lock group. These structs
|
||||
* track the counts for all the inodes in a lock group and maintain a
|
||||
* bitmap whose bits are set for each non-zero count.
|
||||
* The presence of an inode in the inode sets its bit in the lock
|
||||
* group's bitmap.
|
||||
*
|
||||
* We don't want to add additional global synchronization of inode cache
|
||||
* maintenance so these are tracked in an rcu hash table. Once their
|
||||
* total count reaches zero they're removed from the hash and queued for
|
||||
* total reaches zero they're removed from the hash and queued for
|
||||
* freeing and readers should ignore them.
|
||||
*/
|
||||
struct omap_group {
|
||||
@@ -102,7 +95,6 @@ struct omap_group {
|
||||
u64 nr;
|
||||
spinlock_t lock;
|
||||
unsigned int total;
|
||||
unsigned int *counts;
|
||||
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
|
||||
};
|
||||
|
||||
@@ -111,8 +103,7 @@ do { \
|
||||
__typeof__(group) _grp = (group); \
|
||||
__typeof__(bit_nr) _nr = (bit_nr); \
|
||||
\
|
||||
trace_scoutfs_omap_group_##which(sb, _grp, _grp->nr, _grp->total, _nr, \
|
||||
_nr < 0 ? -1 : _grp->counts[_nr]); \
|
||||
trace_scoutfs_omap_group_##which(sb, _grp, _grp->nr, _grp->total, _nr); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@@ -134,18 +125,6 @@ struct omap_request {
|
||||
struct scoutfs_open_ino_map map;
|
||||
};
|
||||
|
||||
/*
|
||||
* In each inode group cluster lock we store data to track the open ino
|
||||
* map which tracks all the inodes that the cluster lock covers. When
|
||||
* the seq shows that the map is stale we send a request to update it.
|
||||
*/
|
||||
struct scoutfs_omap_lock_data {
|
||||
u64 seq;
|
||||
bool req_in_flight;
|
||||
wait_queue_head_t waitq;
|
||||
struct scoutfs_open_ino_map map;
|
||||
};
|
||||
|
||||
static inline void init_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
INIT_LIST_HEAD(&list->head);
|
||||
@@ -178,6 +157,15 @@ static int free_rid(struct omap_rid_list *list, struct omap_rid_entry *entry)
|
||||
return nr;
|
||||
}
|
||||
|
||||
static void free_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &list->head, head)
|
||||
free_rid(list, entry);
|
||||
}
|
||||
|
||||
static int copy_rids(struct omap_rid_list *to, struct omap_rid_list *from, spinlock_t *from_lock)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
@@ -232,7 +220,7 @@ static void free_rids(struct omap_rid_list *list)
|
||||
}
|
||||
}
|
||||
|
||||
static void calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr)
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr)
|
||||
{
|
||||
*group_nr = ino >> SCOUTFS_OPEN_INO_MAP_SHIFT;
|
||||
*bit_nr = ino & SCOUTFS_OPEN_INO_MAP_MASK;
|
||||
@@ -242,21 +230,13 @@ static struct omap_group *alloc_group(struct super_block *sb, u64 group_nr)
|
||||
{
|
||||
struct omap_group *group;
|
||||
|
||||
BUILD_BUG_ON((sizeof(group->counts[0]) * SCOUTFS_OPEN_INO_MAP_BITS) > PAGE_SIZE);
|
||||
|
||||
group = kzalloc(sizeof(struct omap_group), GFP_NOFS);
|
||||
if (group) {
|
||||
group->sb = sb;
|
||||
group->nr = group_nr;
|
||||
spin_lock_init(&group->lock);
|
||||
|
||||
group->counts = (void *)get_zeroed_page(GFP_NOFS);
|
||||
if (!group->counts) {
|
||||
kfree(group);
|
||||
group = NULL;
|
||||
} else {
|
||||
trace_group(sb, alloc, group, -1);
|
||||
}
|
||||
trace_group(sb, alloc, group, -1);
|
||||
}
|
||||
|
||||
return group;
|
||||
@@ -265,7 +245,6 @@ static struct omap_group *alloc_group(struct super_block *sb, u64 group_nr)
|
||||
static void free_group(struct super_block *sb, struct omap_group *group)
|
||||
{
|
||||
trace_group(sb, free, group, -1);
|
||||
free_page((unsigned long)group->counts);
|
||||
kfree(group);
|
||||
}
|
||||
|
||||
@@ -283,13 +262,16 @@ static const struct rhashtable_params group_ht_params = {
|
||||
};
|
||||
|
||||
/*
|
||||
* Track an cached inode in its group. Our increment can be racing with
|
||||
* a final decrement that removes the group from the hash, sets total to
|
||||
* Track an cached inode in its group. Our set can be racing with a
|
||||
* final clear that removes the group from the hash, sets total to
|
||||
* UINT_MAX, and calls rcu free. We can retry until the dead group is
|
||||
* no longer visible in the hash table and we can insert a new allocated
|
||||
* group.
|
||||
*
|
||||
* The caller must ensure that the bit is clear, -EEXIST will be
|
||||
* returned otherwise.
|
||||
*/
|
||||
int scoutfs_omap_inc(struct super_block *sb, u64 ino)
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
@@ -298,7 +280,7 @@ int scoutfs_omap_inc(struct super_block *sb, u64 ino)
|
||||
bool found;
|
||||
int ret = 0;
|
||||
|
||||
calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
retry:
|
||||
found = false;
|
||||
@@ -308,10 +290,10 @@ retry:
|
||||
spin_lock(&group->lock);
|
||||
if (group->total < UINT_MAX) {
|
||||
found = true;
|
||||
if (group->counts[bit_nr]++ == 0) {
|
||||
set_bit_le(bit_nr, group->bits);
|
||||
if (WARN_ON_ONCE(test_and_set_bit_le(bit_nr, group->bits)))
|
||||
ret = -EEXIST;
|
||||
else
|
||||
group->total++;
|
||||
}
|
||||
}
|
||||
trace_group(sb, inc, group, bit_nr);
|
||||
spin_unlock(&group->lock);
|
||||
@@ -342,29 +324,50 @@ retry:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
bool ret = false;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
ret = !!test_bit_le(bit_nr, group->bits);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrement a previously incremented ino count. Not finding a count
|
||||
* implies imbalanced inc/dec or bugs freeing groups. We only free
|
||||
* groups here as the last dec drops the group's total count to 0.
|
||||
* Clear a previously set ino bit. Trying to clear a bit that's already
|
||||
* clear implies imbalanced set/clear or bugs freeing groups. We only
|
||||
* free groups here as the last clear drops the group's total to 0.
|
||||
*/
|
||||
void scoutfs_omap_dec(struct super_block *sb, u64 ino)
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
WARN_ON_ONCE(group->counts[bit_nr] == 0);
|
||||
WARN_ON_ONCE(!test_bit_le(bit_nr, group->bits));
|
||||
WARN_ON_ONCE(group->total == 0);
|
||||
WARN_ON_ONCE(group->total == UINT_MAX);
|
||||
if (--group->counts[bit_nr] == 0) {
|
||||
clear_bit_le(bit_nr, group->bits);
|
||||
if (test_and_clear_bit_le(bit_nr, group->bits)) {
|
||||
if (--group->total == 0) {
|
||||
group->total = UINT_MAX;
|
||||
rhashtable_remove_fast(&ominf->group_ht, &group->ht_head,
|
||||
@@ -664,8 +667,7 @@ int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
|
||||
|
||||
/*
|
||||
* The client is receiving a request from the server for its map for the
|
||||
* given group. Look up the group and copy the bits to the map for
|
||||
* non-zero open counts.
|
||||
* given group. Look up the group and copy the bits to the map.
|
||||
*
|
||||
* The mount originating the request for this bitmap has the inode group
|
||||
* write locked. We can't be adding links to any inodes in the group
|
||||
@@ -811,182 +813,13 @@ void scoutfs_omap_server_shutdown(struct super_block *sb)
|
||||
llist_for_each_entry_safe(req, tmp, requests, llnode)
|
||||
kfree(req);
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
free_rid_list(&ominf->rids);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static bool omap_req_in_flight(struct scoutfs_lock *lock, struct scoutfs_omap_lock_data *ldata)
|
||||
{
|
||||
bool in_flight;
|
||||
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
in_flight = ldata->req_in_flight;
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
|
||||
return in_flight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the map covered by the cluster lock is current. The caller
|
||||
* holds the cluster lock so once we store lock_data on the cluster lock
|
||||
* it won't be freed and the write_seq in the cluster lock won't change.
|
||||
*
|
||||
* The omap_spinlock protects the omap_data in the cluster lock. We
|
||||
* have to drop it if we have to block to allocate lock_data, send a
|
||||
* request for a new map, or wait for a request in flight to finish.
|
||||
*/
|
||||
static int get_current_lock_data(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
struct scoutfs_omap_lock_data **ldata_ret, u64 group_nr)
|
||||
{
|
||||
struct scoutfs_omap_lock_data *ldata;
|
||||
bool send_req;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
|
||||
ldata = lock->omap_data;
|
||||
if (ldata == NULL) {
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
ldata = kzalloc(sizeof(struct scoutfs_omap_lock_data), GFP_NOFS);
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
|
||||
if (!ldata) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (lock->omap_data == NULL) {
|
||||
ldata->seq = lock->write_seq - 1; /* ensure refresh */
|
||||
init_waitqueue_head(&ldata->waitq);
|
||||
|
||||
lock->omap_data = ldata;
|
||||
} else {
|
||||
kfree(ldata);
|
||||
ldata = lock->omap_data;
|
||||
}
|
||||
}
|
||||
|
||||
while (ldata->seq != lock->write_seq) {
|
||||
/* only one waiter sends a request at a time */
|
||||
if (!ldata->req_in_flight) {
|
||||
ldata->req_in_flight = true;
|
||||
send_req = true;
|
||||
} else {
|
||||
send_req = false;
|
||||
}
|
||||
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
if (send_req)
|
||||
ret = scoutfs_client_open_ino_map(sb, group_nr, &ldata->map);
|
||||
else
|
||||
wait_event(ldata->waitq, !omap_req_in_flight(lock, ldata));
|
||||
spin_lock(&lock->omap_spinlock);
|
||||
|
||||
/* only sender can return error, other waiters retry */
|
||||
if (send_req) {
|
||||
ldata->req_in_flight = false;
|
||||
if (ret == 0)
|
||||
ldata->seq = lock->write_seq;
|
||||
wake_up(&ldata->waitq);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&lock->omap_spinlock);
|
||||
|
||||
if (ret == 0)
|
||||
*ldata_ret = ldata;
|
||||
else
|
||||
*ldata_ret = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 and give the caller their locks when they should delete the
|
||||
* inode items. It's safe to delete the inode items when it is no
|
||||
* longer reachable and nothing is referencing it.
|
||||
*
|
||||
* The inode is unreachable when nlink hits zero. Cluster locks protect
|
||||
* modification and testing of nlink. We use the ino_lock_cov covrage
|
||||
* to short circuit the common case of having a locked inode that hasn't
|
||||
* been deleted. If it isn't locked, we have to acquire the lock to
|
||||
* refresh the inode to see its current nlink.
|
||||
*
|
||||
* Then we use an open inode bitmap that covers all the inodes in the
|
||||
* lock group to determine if the inode is present in any other mount's
|
||||
* caches. We refresh it by asking the server for all clients' maps and
|
||||
* then store it in the lock. As long as we hold the lock nothing can
|
||||
* increase nlink from zero and let people get a reference to the inode.
|
||||
*/
|
||||
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_lock **lock_ret, struct scoutfs_lock **orph_lock_ret)
|
||||
{
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *orph_lock = NULL;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
const u64 ino = scoutfs_ino(inode);
|
||||
struct scoutfs_omap_lock_data *ldata;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
/* lock group and omap constants are defined independently */
|
||||
BUILD_BUG_ON(SCOUTFS_OPEN_INO_MAP_BITS != SCOUTFS_LOCK_INODE_GROUP_NR);
|
||||
|
||||
if (scoutfs_lock_is_covered(sb, &si->ino_lock_cov) && inode->i_nlink > 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (inode->i_nlink > 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
/* only one request to refresh the map at a time */
|
||||
ret = get_current_lock_data(sb, lock, &ldata, group_nr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* can delete caller's zero nlink inode if it's not cached in other mounts */
|
||||
ret = !test_bit_le(bit_nr, ldata->map.bits);
|
||||
out:
|
||||
trace_scoutfs_omap_should_delete(sb, ino, inode->i_nlink, ret);
|
||||
|
||||
if (ret > 0) {
|
||||
err = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, ino, &orph_lock);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
if (ret <= 0) {
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
lock = NULL;
|
||||
}
|
||||
|
||||
*lock_ret = lock;
|
||||
*orph_lock_ret = orph_lock;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata)
|
||||
{
|
||||
if (ldata) {
|
||||
WARN_ON_ONCE(ldata->req_in_flight);
|
||||
WARN_ON_ONCE(waitqueue_active(&ldata->waitq));
|
||||
kfree(ldata);
|
||||
}
|
||||
}
|
||||
|
||||
int scoutfs_omap_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -1044,6 +877,10 @@ void scoutfs_omap_destroy(struct super_block *sb)
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
free_rid_list(&ominf->rids);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
rhashtable_destroy(&ominf->group_ht);
|
||||
rhashtable_destroy(&ominf->req_ht);
|
||||
kfree(ominf);
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#ifndef _SCOUTFS_OMAP_H_
|
||||
#define _SCOUTFS_OMAP_H_
|
||||
|
||||
int scoutfs_omap_inc(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_dec(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_should_delete(struct super_block *sb, struct inode *inode,
|
||||
struct scoutfs_lock **lock_ret, struct scoutfs_lock **orph_lock_ret);
|
||||
void scoutfs_omap_free_lock_data(struct scoutfs_omap_lock_data *ldata);
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino);
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr);
|
||||
|
||||
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid);
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid);
|
||||
|
||||
@@ -26,22 +26,45 @@
|
||||
#include "msg.h"
|
||||
#include "options.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "alloc.h"
|
||||
|
||||
enum {
|
||||
Opt_acl,
|
||||
Opt_data_prealloc_blocks,
|
||||
Opt_data_prealloc_contig_only,
|
||||
Opt_log_merge_wait_timeout_ms,
|
||||
Opt_metadev_path,
|
||||
Opt_noacl,
|
||||
Opt_orphan_scan_delay_ms,
|
||||
Opt_quorum_heartbeat_timeout_ms,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_tcp_keepalive_timeout_ms,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_acl, "acl"},
|
||||
{Opt_data_prealloc_blocks, "data_prealloc_blocks=%s"},
|
||||
{Opt_data_prealloc_contig_only, "data_prealloc_contig_only=%s"},
|
||||
{Opt_log_merge_wait_timeout_ms, "log_merge_wait_timeout_ms=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_noacl, "noacl"},
|
||||
{Opt_orphan_scan_delay_ms, "orphan_scan_delay_ms=%s"},
|
||||
{Opt_quorum_heartbeat_timeout_ms, "quorum_heartbeat_timeout_ms=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_tcp_keepalive_timeout_ms, "tcp_keepalive_timeout_ms=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
struct options_sb_info {
|
||||
struct dentry *debugfs_dir;
|
||||
struct options_info {
|
||||
seqlock_t seqlock;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_sysfs_attrs sysfs_attrs;
|
||||
};
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
#define DECLARE_OPTIONS_INFO(sb, name) \
|
||||
struct options_info *name = SCOUTFS_SB(sb)->options_info
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
@@ -89,58 +112,211 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
static void free_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
kfree(opts->metadev_path);
|
||||
}
|
||||
|
||||
#define MIN_LOG_MERGE_WAIT_TIMEOUT_MS 100UL
|
||||
#define DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS 500
|
||||
#define MAX_LOG_MERGE_WAIT_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
#define MIN_ORPHAN_SCAN_DELAY_MS 100UL
|
||||
#define DEFAULT_ORPHAN_SCAN_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_ORPHAN_SCAN_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
#define MIN_DATA_PREALLOC_BLOCKS 1ULL
|
||||
#define MAX_DATA_PREALLOC_BLOCKS ((unsigned long long)SCOUTFS_BLOCK_SM_MAX)
|
||||
|
||||
#define DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS (10 * MSEC_PER_SEC)
|
||||
|
||||
static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
memset(opts, 0, sizeof(*opts));
|
||||
|
||||
opts->data_prealloc_blocks = SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS;
|
||||
opts->data_prealloc_contig_only = 1;
|
||||
opts->log_merge_wait_timeout_ms = DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS;
|
||||
opts->orphan_scan_delay_ms = -1;
|
||||
opts->quorum_heartbeat_timeout_ms = SCOUTFS_QUORUM_DEF_HB_TIMEO_MS;
|
||||
opts->quorum_slot_nr = -1;
|
||||
opts->tcp_keepalive_timeout_ms = DEFAULT_TCP_KEEPALIVE_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
static int verify_log_merge_wait_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse log_merge_wait_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < MIN_LOG_MERGE_WAIT_TIMEOUT_MS || val > MAX_LOG_MERGE_WAIT_TIMEOUT_MS) {
|
||||
scoutfs_err(sb, "invalid log_merge_wait_timeout_ms value %d, must be between %lu and %lu",
|
||||
val, MIN_LOG_MERGE_WAIT_TIMEOUT_MS, MAX_LOG_MERGE_WAIT_TIMEOUT_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_quorum_heartbeat_timeout_ms(struct super_block *sb, int ret, u64 val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse quorum_heartbeat_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < SCOUTFS_QUORUM_MIN_HB_TIMEO_MS || val > SCOUTFS_QUORUM_MAX_HB_TIMEO_MS) {
|
||||
scoutfs_err(sb, "invalid quorum_heartbeat_timeout_ms value %llu, must be between %lu and %lu",
|
||||
val, SCOUTFS_QUORUM_MIN_HB_TIMEO_MS, SCOUTFS_QUORUM_MAX_HB_TIMEO_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_tcp_keepalive_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse tcp_keepalive_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val <= (UNRESPONSIVE_PROBES * MSEC_PER_SEC)) {
|
||||
scoutfs_err(sb, "invalid tcp_keepalive_timeout_ms value %d, must be larger than %lu",
|
||||
val, (UNRESPONSIVE_PROBES * MSEC_PER_SEC));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the option string into our options struct. This can allocate
|
||||
* memory in the struct. The caller is responsible for always calling
|
||||
* free_options() when the struct is destroyed, including when we return
|
||||
* an error.
|
||||
*/
|
||||
static int parse_options(struct super_block *sb, char *options, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
u64 nr64;
|
||||
int nr;
|
||||
int token;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_quorum_slot_nr:
|
||||
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
case Opt_acl:
|
||||
sb->s_flags |= SB_POSIXACL;
|
||||
break;
|
||||
|
||||
case Opt_data_prealloc_blocks:
|
||||
ret = match_u64(args, &nr64);
|
||||
if (ret < 0 ||
|
||||
nr64 < MIN_DATA_PREALLOC_BLOCKS || nr64 > MAX_DATA_PREALLOC_BLOCKS) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_blocks option, must be between %llu and %llu",
|
||||
MIN_DATA_PREALLOC_BLOCKS, MAX_DATA_PREALLOC_BLOCKS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->data_prealloc_blocks = nr64;
|
||||
break;
|
||||
|
||||
case Opt_data_prealloc_contig_only:
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 || nr > 1) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_contig_only option, bool must only be 0 or 1");
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->data_prealloc_contig_only = nr;
|
||||
break;
|
||||
|
||||
case Opt_tcp_keepalive_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_tcp_keepalive_timeout_ms(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->tcp_keepalive_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_log_merge_wait_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->log_merge_wait_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_metadev_path:
|
||||
ret = parse_bdev_path(sb, &args[0], &opts->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case Opt_noacl:
|
||||
sb->s_flags &= ~SB_POSIXACL;
|
||||
break;
|
||||
|
||||
case Opt_orphan_scan_delay_ms:
|
||||
if (opts->orphan_scan_delay_ms != -1) {
|
||||
scoutfs_err(sb, "multiple orphan_scan_delay_ms options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 ||
|
||||
nr < MIN_ORPHAN_SCAN_DELAY_MS || nr > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms option, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->orphan_scan_delay_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_quorum_heartbeat_timeout_ms:
|
||||
ret = match_u64(args, &nr64);
|
||||
ret = verify_quorum_heartbeat_timeout_ms(sb, ret, nr64);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->quorum_heartbeat_timeout_ms = nr64;
|
||||
break;
|
||||
|
||||
case Opt_quorum_slot_nr:
|
||||
if (opts->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
if (ret < 0 || nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
parsed->quorum_slot_nr = nr;
|
||||
opts->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
ret = parse_bdev_path(sb, &args[0],
|
||||
&parsed->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"",
|
||||
p);
|
||||
break;
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"", p);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!parsed->metadev_path) {
|
||||
if (opts->orphan_scan_delay_ms == -1)
|
||||
opts->orphan_scan_delay_ms = DEFAULT_ORPHAN_SCAN_DELAY_MS;
|
||||
|
||||
if (!opts->metadev_path) {
|
||||
scoutfs_err(sb, "Required mount option \"metadev_path\" not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -148,40 +324,344 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
unsigned int seq;
|
||||
|
||||
if (WARN_ON_ONCE(optinf == NULL)) {
|
||||
/* trying to use options before early setup or after destroy */
|
||||
init_default_options(opts);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&optinf->seqlock);
|
||||
memcpy(opts, &optinf->opts, sizeof(struct scoutfs_mount_options));
|
||||
} while (read_seqretry(&optinf->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Early setup that parses and stores the options so that the rest of
|
||||
* setup can use them. Full options setup that relies on other
|
||||
* components will be done later.
|
||||
*/
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct options_sb_info *osi;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct options_info *optinf;
|
||||
int ret;
|
||||
|
||||
osi = kzalloc(sizeof(struct options_sb_info), GFP_KERNEL);
|
||||
if (!osi)
|
||||
return -ENOMEM;
|
||||
init_default_options(&opts);
|
||||
|
||||
sbi->options = osi;
|
||||
ret = parse_options(sb, options, &opts);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
osi->debugfs_dir = debugfs_create_dir("options", sbi->debug_root);
|
||||
if (!osi->debugfs_dir) {
|
||||
optinf = kzalloc(sizeof(struct options_info), GFP_KERNEL);
|
||||
if (!optinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seqlock_init(&optinf->seqlock);
|
||||
scoutfs_sysfs_init_attrs(sb, &optinf->sysfs_attrs);
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts = opts;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
sbi->options_info = optinf;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
free_options(&opts);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct scoutfs_mount_options opts;
|
||||
const bool is_acl = !!(sb->s_flags & SB_POSIXACL);
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
if (is_acl)
|
||||
seq_puts(seq, ",acl");
|
||||
seq_printf(seq, ",data_prealloc_blocks=%llu", opts.data_prealloc_blocks);
|
||||
seq_printf(seq, ",data_prealloc_contig_only=%u", opts.data_prealloc_contig_only);
|
||||
seq_printf(seq, ",metadev_path=%s", opts.metadev_path);
|
||||
if (!is_acl)
|
||||
seq_puts(seq, ",noacl");
|
||||
seq_printf(seq, ",orphan_scan_delay_ms=%u", opts.orphan_scan_delay_ms);
|
||||
if (opts.quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts.quorum_slot_nr);
|
||||
seq_printf(seq, ",tcp_keepalive_timeout_ms=%d", opts.tcp_keepalive_timeout_ms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t data_prealloc_blocks_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", opts.data_prealloc_blocks);
|
||||
}
|
||||
static ssize_t data_prealloc_blocks_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_DATA_PREALLOC_BLOCKS || val > MAX_DATA_PREALLOC_BLOCKS) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_blocks option, must be between %llu and %llu",
|
||||
MIN_DATA_PREALLOC_BLOCKS, MAX_DATA_PREALLOC_BLOCKS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.data_prealloc_blocks = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_blocks);
|
||||
|
||||
static ssize_t data_prealloc_contig_only_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.data_prealloc_contig_only);
|
||||
}
|
||||
static ssize_t data_prealloc_contig_only_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < 0 || val > 1) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_contig_only option, bool must be 0 or 1");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.data_prealloc_contig_only = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_contig_only);
|
||||
|
||||
static ssize_t log_merge_wait_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.log_merge_wait_timeout_ms);
|
||||
}
|
||||
static ssize_t log_merge_wait_timeout_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
int val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoint(nullterm, 0, &val);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.log_merge_wait_timeout_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(log_merge_wait_timeout_ms);
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts.metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t orphan_scan_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.orphan_scan_delay_ms);
|
||||
}
|
||||
static ssize_t orphan_scan_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_ORPHAN_SCAN_DELAY_MS || val > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms value written to options sysfs file, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.orphan_scan_delay_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(orphan_scan_delay_ms);
|
||||
|
||||
static ssize_t quorum_heartbeat_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", opts.quorum_heartbeat_timeout_ms);
|
||||
}
|
||||
static ssize_t quorum_heartbeat_timeout_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
ret = verify_quorum_heartbeat_timeout_ms(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.quorum_heartbeat_timeout_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(quorum_heartbeat_timeout_ms);
|
||||
|
||||
static ssize_t quorum_slot_nr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts.quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_slot_nr);
|
||||
|
||||
static struct attribute *options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_blocks),
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_contig_only),
|
||||
SCOUTFS_ATTR_PTR(log_merge_wait_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(orphan_scan_delay_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_heartbeat_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_slot_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &optinf->sysfs_attrs, options_attrs, "mount_options");
|
||||
if (ret < 0)
|
||||
scoutfs_options_destroy(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We remove the sysfs files early in unmount so that they can't try to call other subsystems
|
||||
* as they're being destroyed.
|
||||
*/
|
||||
void scoutfs_options_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (optinf)
|
||||
scoutfs_sysfs_destroy_attrs(sb, &optinf->sysfs_attrs);
|
||||
}
|
||||
|
||||
void scoutfs_options_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct options_sb_info *osi = sbi->options;
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (osi) {
|
||||
if (osi->debugfs_dir)
|
||||
debugfs_remove_recursive(osi->debugfs_dir);
|
||||
kfree(osi);
|
||||
sbi->options = NULL;
|
||||
scoutfs_options_stop(sb);
|
||||
|
||||
if (optinf) {
|
||||
free_options(&optinf->opts);
|
||||
kfree(optinf);
|
||||
sbi->options_info = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,23 +5,25 @@
|
||||
#include <linux/in.h>
|
||||
#include "format.h"
|
||||
|
||||
enum scoutfs_mount_options {
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
struct mount_options {
|
||||
int quorum_slot_nr;
|
||||
struct scoutfs_mount_options {
|
||||
u64 data_prealloc_blocks;
|
||||
bool data_prealloc_contig_only;
|
||||
unsigned int log_merge_wait_timeout_ms;
|
||||
char *metadev_path;
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
int quorum_slot_nr;
|
||||
u64 quorum_heartbeat_timeout_ms;
|
||||
int tcp_keepalive_timeout_ms;
|
||||
};
|
||||
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed);
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts);
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root);
|
||||
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options);
|
||||
int scoutfs_options_setup(struct super_block *sb);
|
||||
void scoutfs_options_stop(struct super_block *sb);
|
||||
void scoutfs_options_destroy(struct super_block *sb);
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token);
|
||||
#define scoutfs_option_bool scoutfs_option_u32
|
||||
|
||||
#endif /* _SCOUTFS_OPTIONS_H_ */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,14 +2,13 @@
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb, u64 term);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, u64 term);
|
||||
int scoutfs_quorum_fence_complete(struct super_block *sb, u64 term);
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_config *qconf,
|
||||
u64 term);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
|
||||
1266
kmod/src/quota.c
Normal file
1266
kmod/src/quota.c
Normal file
File diff suppressed because it is too large
Load Diff
48
kmod/src/quota.h
Normal file
48
kmod/src/quota.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef _SCOUTFS_QUOTA_H_
|
||||
#define _SCOUTFS_QUOTA_H_
|
||||
|
||||
#include "ioctl.h"
|
||||
|
||||
/*
|
||||
* Each rule's name can be in the ruleset's rbtree associated with the
|
||||
* source attr that it selects. This lets checks only test rules that
|
||||
* the inputs could match. The 'i' field indicates which name is in the
|
||||
* tree so we can find the containing rule.
|
||||
*
|
||||
* This is mostly private to quota.c but we expose it for tracing.
|
||||
*/
|
||||
struct squota_rule {
|
||||
u64 limit;
|
||||
u8 prio;
|
||||
u8 op;
|
||||
u8 rule_flags;
|
||||
struct squota_rule_name {
|
||||
struct rb_node node;
|
||||
u64 val;
|
||||
u8 source;
|
||||
u8 flags;
|
||||
u8 i;
|
||||
} names[3];
|
||||
};
|
||||
|
||||
/* private to quota.c, only here for tracing */
|
||||
struct squota_input {
|
||||
u64 attrs[SQ_NS__NR_SELECT];
|
||||
u8 op;
|
||||
};
|
||||
|
||||
int scoutfs_quota_check_inode(struct super_block *sb, struct inode *dir);
|
||||
int scoutfs_quota_check_data(struct super_block *sb, struct inode *inode);
|
||||
|
||||
int scoutfs_quota_get_rules(struct super_block *sb, u64 *iterator,
|
||||
struct scoutfs_ioctl_quota_rule *irules, int nr);
|
||||
int scoutfs_quota_mod_rule(struct super_block *sb, bool is_add,
|
||||
struct scoutfs_ioctl_quota_rule *irule);
|
||||
|
||||
void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_quota_invalidate(struct super_block *sb);
|
||||
|
||||
int scoutfs_quota_setup(struct super_block *sb);
|
||||
void scoutfs_quota_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
@@ -76,10 +76,10 @@ static struct recov_pending *lookup_pending(struct recov_info *recinf, u64 rid,
|
||||
* We keep the pending list sorted by rid so that we can iterate over
|
||||
* them. The list should be small and shouldn't be used often.
|
||||
*/
|
||||
static int cmp_pending_rid(void *priv, struct list_head *A, struct list_head *B)
|
||||
static int cmp_pending_rid(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
struct recov_pending *a = list_entry(A, struct recov_pending, head);
|
||||
struct recov_pending *b = list_entry(B, struct recov_pending, head);
|
||||
KC_LIST_CMP_CONST struct recov_pending *a = list_entry(A, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
KC_LIST_CMP_CONST struct recov_pending *b = list_entry(B, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
|
||||
return scoutfs_cmp_u64s(a->rid, b->rid);
|
||||
}
|
||||
@@ -262,7 +262,7 @@ void scoutfs_recov_shutdown(struct super_block *sb)
|
||||
recinf->timeout_fn = NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
list_for_each_entry_safe(pend, tmp, &recinf->pending, head) {
|
||||
list_for_each_entry_safe(pend, tmp, &list, head) {
|
||||
list_del(&pend->head);
|
||||
kfree(pend);
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/unaligned/access_ok.h>
|
||||
|
||||
#include "key.h"
|
||||
#include "format.h"
|
||||
@@ -37,6 +36,10 @@
|
||||
#include "net.h"
|
||||
#include "data.h"
|
||||
#include "ext.h"
|
||||
#include "quota.h"
|
||||
|
||||
#include "trace/quota.h"
|
||||
#include "trace/wkic.h"
|
||||
|
||||
struct lock_info;
|
||||
|
||||
@@ -283,6 +286,52 @@ TRACE_EVENT(scoutfs_data_alloc_block_enter,
|
||||
STE_ENTRY_ARGS(ext))
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_data_page_mkwrite,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, __u64 pos, __u32 ret),
|
||||
|
||||
TP_ARGS(sb, ino, pos, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, pos)
|
||||
__field(__u32, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->pos = pos;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu pos %llu ret %u ",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->pos, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_data_filemap_fault,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, __u64 pos, __u32 ret),
|
||||
|
||||
TP_ARGS(sb, ino, pos, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, pos)
|
||||
__field(__u32, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->pos = pos;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu pos %llu ret %u ",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->pos, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_data_file_extent_class,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, struct scoutfs_extent *ext),
|
||||
|
||||
@@ -439,6 +488,7 @@ DECLARE_EVENT_CLASS(scoutfs_trans_hold_release_class,
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->journal_info = (unsigned long)journal_info;
|
||||
__entry->holders = holders;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" journal_info 0x%0lx holders %d ret %d",
|
||||
@@ -691,16 +741,16 @@ TRACE_EVENT(scoutfs_evict_inode,
|
||||
|
||||
TRACE_EVENT(scoutfs_drop_inode,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, unsigned int nlink,
|
||||
unsigned int unhashed, bool drop_invalidated),
|
||||
unsigned int unhashed, bool lock_covered),
|
||||
|
||||
TP_ARGS(sb, ino, nlink, unhashed, drop_invalidated),
|
||||
TP_ARGS(sb, ino, nlink, unhashed, lock_covered),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(unsigned int, nlink)
|
||||
__field(unsigned int, unhashed)
|
||||
__field(unsigned int, drop_invalidated)
|
||||
__field(unsigned int, lock_covered)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -708,12 +758,12 @@ TRACE_EVENT(scoutfs_drop_inode,
|
||||
__entry->ino = ino;
|
||||
__entry->nlink = nlink;
|
||||
__entry->unhashed = unhashed;
|
||||
__entry->drop_invalidated = !!drop_invalidated;
|
||||
__entry->lock_covered = !!lock_covered;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu nlink %u unhashed %d drop_invalidated %u", SCSB_TRACE_ARGS,
|
||||
TP_printk(SCSBF" ino %llu nlink %u unhashed %d lock_covered %u", SCSB_TRACE_ARGS,
|
||||
__entry->ino, __entry->nlink, __entry->unhashed,
|
||||
__entry->drop_invalidated)
|
||||
__entry->lock_covered)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_inode_walk_writeback,
|
||||
@@ -773,13 +823,14 @@ DEFINE_EVENT(scoutfs_lock_info_class, scoutfs_lock_destroy,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_xattr_set,
|
||||
TP_PROTO(struct super_block *sb, size_t name_len, const void *value,
|
||||
size_t size, int flags),
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, size_t name_len,
|
||||
const void *value, size_t size, int flags),
|
||||
|
||||
TP_ARGS(sb, name_len, value, size, flags),
|
||||
TP_ARGS(sb, ino, name_len, value, size, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(size_t, name_len)
|
||||
__field(const void *, value)
|
||||
__field(size_t, size)
|
||||
@@ -788,15 +839,16 @@ TRACE_EVENT(scoutfs_xattr_set,
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->name_len = name_len;
|
||||
__entry->value = value;
|
||||
__entry->size = size;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" name_len %zu value %p size %zu flags 0x%x",
|
||||
SCSB_TRACE_ARGS, __entry->name_len, __entry->value,
|
||||
__entry->size, __entry->flags)
|
||||
TP_printk(SCSBF" ino %llu name_len %zu value %p size %zu flags 0x%x",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->name_len,
|
||||
__entry->value, __entry->size, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_advance_dirty_super,
|
||||
@@ -817,22 +869,17 @@ TRACE_EVENT(scoutfs_advance_dirty_super,
|
||||
TP_printk(SCSBF" super seq now %llu", SCSB_TRACE_ARGS, __entry->seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_dir_add_next_linkref,
|
||||
TRACE_EVENT(scoutfs_dir_add_next_linkref_found,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, __u64 dir_ino,
|
||||
__u64 dir_pos, int ret, __u64 found_dir_ino,
|
||||
__u64 found_dir_pos, unsigned int name_len),
|
||||
__u64 dir_pos, unsigned int name_len),
|
||||
|
||||
TP_ARGS(sb, ino, dir_ino, dir_pos, ret, found_dir_pos, found_dir_ino,
|
||||
name_len),
|
||||
TP_ARGS(sb, ino, dir_ino, dir_pos, name_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, dir_ino)
|
||||
__field(__u64, dir_pos)
|
||||
__field(int, ret)
|
||||
__field(__u64, found_dir_ino)
|
||||
__field(__u64, found_dir_pos)
|
||||
__field(unsigned int, name_len)
|
||||
),
|
||||
|
||||
@@ -841,16 +888,43 @@ TRACE_EVENT(scoutfs_dir_add_next_linkref,
|
||||
__entry->ino = ino;
|
||||
__entry->dir_ino = dir_ino;
|
||||
__entry->dir_pos = dir_pos;
|
||||
__entry->ret = ret;
|
||||
__entry->found_dir_ino = dir_ino;
|
||||
__entry->found_dir_pos = dir_pos;
|
||||
__entry->name_len = name_len;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu dir_ino %llu dir_pos %llu ret %d found_dir_ino %llu found_dir_pos %llu name_len %u",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->dir_pos,
|
||||
__entry->dir_ino, __entry->ret, __entry->found_dir_pos,
|
||||
__entry->found_dir_ino, __entry->name_len)
|
||||
TP_printk(SCSBF" ino %llu dir_ino %llu dir_pos %llu name_len %u",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->dir_ino,
|
||||
__entry->dir_pos, __entry->name_len)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_dir_add_next_linkrefs,
|
||||
TP_PROTO(struct super_block *sb, __u64 ino, __u64 dir_ino,
|
||||
__u64 dir_pos, int count, int nr, int ret),
|
||||
|
||||
TP_ARGS(sb, ino, dir_ino, dir_pos, count, nr, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ino)
|
||||
__field(__u64, dir_ino)
|
||||
__field(__u64, dir_pos)
|
||||
__field(int, count)
|
||||
__field(int, nr)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->dir_ino = dir_ino;
|
||||
__entry->dir_pos = dir_pos;
|
||||
__entry->count = count;
|
||||
__entry->nr = nr;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu dir_ino %llu dir_pos %llu count %d nr %d ret %d",
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->dir_ino,
|
||||
__entry->dir_pos, __entry->count, __entry->nr, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_write_begin,
|
||||
@@ -1020,9 +1094,12 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
__field(u64, refresh_gen)
|
||||
__field(u64, write_seq)
|
||||
__field(u64, dirty_trans_seq)
|
||||
__field(unsigned char, request_pending)
|
||||
__field(unsigned char, invalidate_pending)
|
||||
__field(int, mode)
|
||||
__field(int, invalidating_mode)
|
||||
__field(unsigned int, waiters_cw)
|
||||
__field(unsigned int, waiters_pr)
|
||||
__field(unsigned int, waiters_ex)
|
||||
@@ -1035,9 +1112,12 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
sk_trace_assign(start, &lck->start);
|
||||
sk_trace_assign(end, &lck->end);
|
||||
__entry->refresh_gen = lck->refresh_gen;
|
||||
__entry->write_seq = lck->write_seq;
|
||||
__entry->dirty_trans_seq = lck->dirty_trans_seq;
|
||||
__entry->request_pending = lck->request_pending;
|
||||
__entry->invalidate_pending = lck->invalidate_pending;
|
||||
__entry->mode = lck->mode;
|
||||
__entry->invalidating_mode = lck->invalidating_mode;
|
||||
__entry->waiters_pr = lck->waiters[SCOUTFS_LOCK_READ];
|
||||
__entry->waiters_ex = lck->waiters[SCOUTFS_LOCK_WRITE];
|
||||
__entry->waiters_cw = lck->waiters[SCOUTFS_LOCK_WRITE_ONLY];
|
||||
@@ -1045,10 +1125,11 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class,
|
||||
__entry->users_ex = lck->users[SCOUTFS_LOCK_WRITE];
|
||||
__entry->users_cw = lck->users[SCOUTFS_LOCK_WRITE_ONLY];
|
||||
),
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" mode %u reqpnd %u invpnd %u rfrgen %llu waiters: pr %u ex %u cw %u users: pr %u ex %u cw %u",
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" mode %u invmd %u reqp %u invp %u refg %llu wris %llu dts %llu waiters: pr %u ex %u cw %u users: pr %u ex %u cw %u",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end),
|
||||
__entry->mode, __entry->request_pending,
|
||||
__entry->invalidate_pending, __entry->refresh_gen,
|
||||
__entry->mode, __entry->invalidating_mode, __entry->request_pending,
|
||||
__entry->invalidate_pending, __entry->refresh_gen, __entry->write_seq,
|
||||
__entry->dirty_trans_seq,
|
||||
__entry->waiters_pr, __entry->waiters_ex, __entry->waiters_cw,
|
||||
__entry->users_pr, __entry->users_ex, __entry->users_cw)
|
||||
);
|
||||
@@ -1417,42 +1498,71 @@ TRACE_EVENT(scoutfs_rename,
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_d_revalidate,
|
||||
TP_PROTO(struct super_block *sb,
|
||||
struct dentry *dentry, int flags, struct dentry *parent,
|
||||
bool is_covered, int ret),
|
||||
TP_PROTO(struct super_block *sb, struct dentry *dentry, int flags, u64 dir_ino, int ret),
|
||||
|
||||
TP_ARGS(sb, dentry, flags, parent, is_covered, ret),
|
||||
TP_ARGS(sb, dentry, flags, dir_ino, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, dentry)
|
||||
__string(name, dentry->d_name.name)
|
||||
__field(__u64, ino)
|
||||
__field(__u64, parent_ino)
|
||||
__field(__u64, dir_ino)
|
||||
__field(int, flags)
|
||||
__field(int, is_root)
|
||||
__field(int, is_covered)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->dentry = dentry;
|
||||
__assign_str(name, dentry->d_name.name)
|
||||
__entry->ino = dentry->d_inode ?
|
||||
scoutfs_ino(dentry->d_inode) : 0;
|
||||
__entry->parent_ino = parent->d_inode ?
|
||||
scoutfs_ino(parent->d_inode) : 0;
|
||||
__entry->ino = dentry->d_inode ? scoutfs_ino(dentry->d_inode) : 0;
|
||||
__entry->dir_ino = dir_ino;
|
||||
__entry->flags = flags;
|
||||
__entry->is_root = IS_ROOT(dentry);
|
||||
__entry->is_covered = is_covered;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" name %s ino %llu parent_ino %llu flags 0x%x s_root %u is_covered %u ret %d",
|
||||
SCSB_TRACE_ARGS, __get_str(name), __entry->ino,
|
||||
__entry->parent_ino, __entry->flags,
|
||||
__entry->is_root,
|
||||
__entry->is_covered,
|
||||
__entry->ret)
|
||||
TP_printk(SCSBF" dentry %p name %s ino %llu dir_ino %llu flags 0x%x s_root %u ret %d",
|
||||
SCSB_TRACE_ARGS, __entry->dentry, __get_str(name), __entry->ino, __entry->dir_ino,
|
||||
__entry->flags, __entry->is_root, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_validate_dentry,
|
||||
TP_PROTO(struct super_block *sb, struct dentry *dentry, u64 dir_ino, u64 dentry_ino,
|
||||
u64 dent_ino, u64 refresh_gen, int ret),
|
||||
|
||||
TP_ARGS(sb, dentry, dir_ino, dentry_ino, dent_ino, refresh_gen, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, dentry)
|
||||
__field(__u64, dir_ino)
|
||||
__string(name, dentry->d_name.name)
|
||||
__field(__u64, dentry_ino)
|
||||
__field(__u64, dent_ino)
|
||||
__field(__u64, fsdata_gen)
|
||||
__field(__u64, refresh_gen)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->dentry = dentry;
|
||||
__entry->dir_ino = dir_ino;
|
||||
__assign_str(name, dentry->d_name.name)
|
||||
__entry->dentry_ino = dentry_ino;
|
||||
__entry->dent_ino = dent_ino;
|
||||
__entry->fsdata_gen = (unsigned long long)dentry->d_fsdata;
|
||||
__entry->refresh_gen = refresh_gen;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" dentry %p dir %llu name %s dentry_ino %llu dent_ino %llu fsdata_gen %llu refresh_gen %llu ret %d",
|
||||
SCSB_TRACE_ARGS, __entry->dentry, __entry->dir_ino, __get_str(name),
|
||||
__entry->dentry_ino, __entry->dent_ino, __entry->fsdata_gen,
|
||||
__entry->refresh_gen, __entry->ret)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_super_lifecycle_class,
|
||||
@@ -1695,21 +1805,41 @@ TRACE_EVENT(scoutfs_btree_merge,
|
||||
sk_trace_args(end))
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_merge_read_range,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
int size),
|
||||
|
||||
TP_ARGS(sb, start, end, size),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
__field(int, size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
__entry->size = size;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" start "SK_FMT" end "SK_FMT" size %d",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(start), sk_trace_args(end), __entry->size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_btree_merge_items,
|
||||
TP_PROTO(struct super_block *sb,
|
||||
struct scoutfs_btree_root *m_root,
|
||||
struct scoutfs_key *m_key, int m_val_len,
|
||||
struct scoutfs_btree_root *f_root,
|
||||
struct scoutfs_key *f_key, int f_val_len,
|
||||
int is_del),
|
||||
|
||||
TP_ARGS(sb, m_root, m_key, m_val_len, f_root, f_key, f_val_len, is_del),
|
||||
TP_ARGS(sb, m_key, m_val_len, f_root, f_key, f_val_len, is_del),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, m_root_blkno)
|
||||
__field(__u64, m_root_seq)
|
||||
__field(__u8, m_root_height)
|
||||
sk_trace_define(m_key)
|
||||
__field(int, m_val_len)
|
||||
__field(__u64, f_root_blkno)
|
||||
@@ -1722,10 +1852,6 @@ TRACE_EVENT(scoutfs_btree_merge_items,
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->m_root_blkno = m_root ?
|
||||
le64_to_cpu(m_root->ref.blkno) : 0;
|
||||
__entry->m_root_seq = m_root ? le64_to_cpu(m_root->ref.seq) : 0;
|
||||
__entry->m_root_height = m_root ? m_root->height : 0;
|
||||
sk_trace_assign(m_key, m_key);
|
||||
__entry->m_val_len = m_val_len;
|
||||
__entry->f_root_blkno = f_root ?
|
||||
@@ -1737,11 +1863,9 @@ TRACE_EVENT(scoutfs_btree_merge_items,
|
||||
__entry->is_del = !!is_del;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" merge item root blkno %llu seq %llu height %u key "SK_FMT" val_len %d, fs item root blkno %llu seq %llu height %u key "SK_FMT" val_len %d, is_del %d",
|
||||
SCSB_TRACE_ARGS, __entry->m_root_blkno, __entry->m_root_seq,
|
||||
__entry->m_root_height, sk_trace_args(m_key),
|
||||
__entry->m_val_len, __entry->f_root_blkno,
|
||||
__entry->f_root_seq, __entry->f_root_height,
|
||||
TP_printk(SCSBF" merge item key "SK_FMT" val_len %d, fs item root blkno %llu seq %llu height %u key "SK_FMT" val_len %d, is_del %d",
|
||||
SCSB_TRACE_ARGS, sk_trace_args(m_key), __entry->m_val_len,
|
||||
__entry->f_root_blkno, __entry->f_root_seq, __entry->f_root_height,
|
||||
sk_trace_args(f_key), __entry->f_val_len, __entry->is_del)
|
||||
);
|
||||
|
||||
@@ -1843,6 +1967,69 @@ DEFINE_EVENT(scoutfs_server_client_count_class, scoutfs_server_client_down,
|
||||
TP_ARGS(sb, rid, nr_clients)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_server_commit_users_class,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(int, holding)
|
||||
__field(int, applying)
|
||||
__field(int, nr_holders)
|
||||
__field(u32, budget)
|
||||
__field(__u32, avail_before)
|
||||
__field(__u32, freed_before)
|
||||
__field(int, committing)
|
||||
__field(int, exceeded)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->holding = !!holding;
|
||||
__entry->applying = !!applying;
|
||||
__entry->nr_holders = nr_holders;
|
||||
__entry->budget = budget;
|
||||
__entry->avail_before = avail_before;
|
||||
__entry->freed_before = freed_before;
|
||||
__entry->committing = !!committing;
|
||||
__entry->exceeded = !!exceeded;
|
||||
),
|
||||
TP_printk(SCSBF" holding %u applying %u nr %u budget %u avail_before %u freed_before %u committing %u exceeded %u",
|
||||
SCSB_TRACE_ARGS, __entry->holding, __entry->applying,
|
||||
__entry->nr_holders, __entry->budget,
|
||||
__entry->avail_before, __entry->freed_before,
|
||||
__entry->committing, __entry->exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_hold,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_apply,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_start,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_server_commit_users_class, scoutfs_server_commit_end,
|
||||
TP_PROTO(struct super_block *sb, int holding, int applying,
|
||||
int nr_holders, u32 budget,
|
||||
u32 avail_before, u32 freed_before,
|
||||
int committing, int exceeded),
|
||||
TP_ARGS(sb, holding, applying, nr_holders, budget, avail_before, freed_before, committing, exceeded)
|
||||
);
|
||||
|
||||
#define slt_symbolic(mode) \
|
||||
__print_symbolic(mode, \
|
||||
{ SLT_CLIENT, "client" }, \
|
||||
@@ -1922,9 +2109,9 @@ DEFINE_EVENT(scoutfs_quorum_message_class, scoutfs_quorum_recv_message,
|
||||
|
||||
TRACE_EVENT(scoutfs_quorum_loop,
|
||||
TP_PROTO(struct super_block *sb, int role, u64 term, int vote_for,
|
||||
unsigned long vote_bits, struct timespec64 timeout),
|
||||
unsigned long vote_bits, unsigned long long nsecs),
|
||||
|
||||
TP_ARGS(sb, role, term, vote_for, vote_bits, timeout),
|
||||
TP_ARGS(sb, role, term, vote_for, vote_bits, nsecs),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
@@ -1933,8 +2120,7 @@ TRACE_EVENT(scoutfs_quorum_loop,
|
||||
__field(int, vote_for)
|
||||
__field(unsigned long, vote_bits)
|
||||
__field(unsigned long, vote_count)
|
||||
__field(unsigned long long, timeout_sec)
|
||||
__field(int, timeout_nsec)
|
||||
__field(unsigned long long, nsecs)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -1944,14 +2130,13 @@ TRACE_EVENT(scoutfs_quorum_loop,
|
||||
__entry->vote_for = vote_for;
|
||||
__entry->vote_bits = vote_bits;
|
||||
__entry->vote_count = hweight_long(vote_bits);
|
||||
__entry->timeout_sec = timeout.tv_sec;
|
||||
__entry->timeout_nsec = timeout.tv_nsec;
|
||||
__entry->nsecs = nsecs;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" term %llu role %d vote_for %d vote_bits 0x%lx vote_count %lu timeout %llu.%u",
|
||||
TP_printk(SCSBF" term %llu role %d vote_for %d vote_bits 0x%lx vote_count %lu timeout %llu",
|
||||
SCSB_TRACE_ARGS, __entry->term, __entry->role,
|
||||
__entry->vote_for, __entry->vote_bits, __entry->vote_count,
|
||||
__entry->timeout_sec, __entry->timeout_nsec)
|
||||
__entry->nsecs)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_trans_seq_last,
|
||||
@@ -1975,6 +2160,71 @@ TRACE_EVENT(scoutfs_trans_seq_last,
|
||||
SCSB_TRACE_ARGS, __entry->s_rid, __entry->trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_server_finalize_items,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, u64 item_rid, u64 item_nr, u64 item_flags,
|
||||
u64 item_get_trans_seq),
|
||||
|
||||
TP_ARGS(sb, rid, item_rid, item_nr, item_flags, item_get_trans_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, c_rid)
|
||||
__field(__u64, item_rid)
|
||||
__field(__u64, item_nr)
|
||||
__field(__u64, item_flags)
|
||||
__field(__u64, item_get_trans_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->c_rid = rid;
|
||||
__entry->item_rid = item_rid;
|
||||
__entry->item_nr = item_nr;
|
||||
__entry->item_flags = item_flags;
|
||||
__entry->item_get_trans_seq = item_get_trans_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx item_rid %016llx item_nr %llu item_flags 0x%llx item_get_trans_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->c_rid, __entry->item_rid, __entry->item_nr,
|
||||
__entry->item_flags, __entry->item_get_trans_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_server_finalize_decision,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, bool saw_finalized, bool others_active,
|
||||
bool ours_visible, bool finalize_ours, unsigned int delay_ms,
|
||||
u64 finalize_sent_seq),
|
||||
|
||||
TP_ARGS(sb, rid, saw_finalized, others_active, ours_visible, finalize_ours, delay_ms,
|
||||
finalize_sent_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, c_rid)
|
||||
__field(bool, saw_finalized)
|
||||
__field(bool, others_active)
|
||||
__field(bool, ours_visible)
|
||||
__field(bool, finalize_ours)
|
||||
__field(unsigned int, delay_ms)
|
||||
__field(__u64, finalize_sent_seq)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->c_rid = rid;
|
||||
__entry->saw_finalized = saw_finalized;
|
||||
__entry->others_active = others_active;
|
||||
__entry->ours_visible = ours_visible;
|
||||
__entry->finalize_ours = finalize_ours;
|
||||
__entry->delay_ms = delay_ms;
|
||||
__entry->finalize_sent_seq = finalize_sent_seq;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rid %016llx saw_finalized %u others_active %u ours_visible %u finalize_ours %u delay_ms %u finalize_sent_seq %llu",
|
||||
SCSB_TRACE_ARGS, __entry->c_rid, __entry->saw_finalized, __entry->others_active,
|
||||
__entry->ours_visible, __entry->finalize_ours, __entry->delay_ms,
|
||||
__entry->finalize_sent_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_get_log_merge_status,
|
||||
TP_PROTO(struct super_block *sb, u64 rid, struct scoutfs_key *next_range_key,
|
||||
u64 nr_requests, u64 nr_complete, u64 seq),
|
||||
@@ -2215,10 +2465,69 @@ TRACE_EVENT(scoutfs_block_dirty_ref,
|
||||
__entry->block_blkno, __entry->block_seq)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_get_file_block,
|
||||
TP_PROTO(struct super_block *sb, u64 blkno, int flags),
|
||||
|
||||
TP_ARGS(sb, blkno, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, blkno)
|
||||
__field(int, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->blkno = blkno;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" blkno %llu flags 0x%x",
|
||||
SCSB_TRACE_ARGS, __entry->blkno, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_block_stale,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_block_ref *ref,
|
||||
struct scoutfs_block_header *hdr, u32 magic, u32 crc),
|
||||
|
||||
TP_ARGS(sb, ref, hdr, magic, crc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, ref_blkno)
|
||||
__field(__u64, ref_seq)
|
||||
__field(__u32, hdr_crc)
|
||||
__field(__u32, hdr_magic)
|
||||
__field(__u64, hdr_fsid)
|
||||
__field(__u64, hdr_seq)
|
||||
__field(__u64, hdr_blkno)
|
||||
__field(__u32, magic)
|
||||
__field(__u32, crc)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ref_blkno = le64_to_cpu(ref->blkno);
|
||||
__entry->ref_seq = le64_to_cpu(ref->seq);
|
||||
__entry->hdr_crc = le32_to_cpu(hdr->crc);
|
||||
__entry->hdr_magic = le32_to_cpu(hdr->magic);
|
||||
__entry->hdr_fsid = le64_to_cpu(hdr->fsid);
|
||||
__entry->hdr_seq = le64_to_cpu(hdr->seq);
|
||||
__entry->hdr_blkno = le64_to_cpu(hdr->blkno);
|
||||
__entry->magic = magic;
|
||||
__entry->crc = crc;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ref_blkno %llu ref_seq %016llx hdr_crc %08x hdr_magic %08x hdr_fsid %016llx hdr_seq %016llx hdr_blkno %llu magic %08x crc %08x",
|
||||
SCSB_TRACE_ARGS, __entry->ref_blkno, __entry->ref_seq, __entry->hdr_crc,
|
||||
__entry->hdr_magic, __entry->hdr_fsid, __entry->hdr_seq, __entry->hdr_blkno,
|
||||
__entry->magic, __entry->crc)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno, int refcount, int io_count,
|
||||
unsigned long bits, __u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed),
|
||||
unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits),
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, bp)
|
||||
@@ -2226,7 +2535,6 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
__field(int, refcount)
|
||||
__field(int, io_count)
|
||||
__field(long, bits)
|
||||
__field(__u64, accessed)
|
||||
),
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
@@ -2235,71 +2543,65 @@ DECLARE_EVENT_CLASS(scoutfs_block_class,
|
||||
__entry->refcount = refcount;
|
||||
__entry->io_count = io_count;
|
||||
__entry->bits = bits;
|
||||
__entry->accessed = accessed;
|
||||
),
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %d io_count %d bits 0x%lx accessed %llu",
|
||||
TP_printk(SCSBF" bp %p blkno %llu refcount %x io_count %d bits 0x%lx",
|
||||
SCSB_TRACE_ARGS, __entry->bp, __entry->blkno, __entry->refcount,
|
||||
__entry->io_count, __entry->bits, __entry->accessed)
|
||||
__entry->io_count, __entry->bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_allocate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_free,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_insert,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_remove,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_end_io,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_submit,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_invalidate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_mark_dirty,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_forget,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_shrink,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits,
|
||||
__u64 accessed),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits, accessed)
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_block_class, scoutfs_block_isolate,
|
||||
TP_PROTO(struct super_block *sb, void *bp, u64 blkno,
|
||||
int refcount, int io_count, unsigned long bits),
|
||||
TP_ARGS(sb, bp, blkno, refcount, io_count, bits)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_ext_next_class,
|
||||
@@ -2620,9 +2922,9 @@ TRACE_EVENT(scoutfs_item_invalidate_page,
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
int bit_nr),
|
||||
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
@@ -2630,7 +2932,6 @@ DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
|
||||
__field(__u64, group_nr)
|
||||
__field(unsigned int, group_total)
|
||||
__field(int, bit_nr)
|
||||
__field(int, bit_count)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -2639,43 +2940,42 @@ DECLARE_EVENT_CLASS(scoutfs_omap_group_class,
|
||||
__entry->group_nr = group_nr;
|
||||
__entry->group_total = group_total;
|
||||
__entry->bit_nr = bit_nr;
|
||||
__entry->bit_count = bit_count;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" grp %p group_nr %llu group_total %u bit_nr %d bit_count %d",
|
||||
TP_printk(SCSBF" grp %p group_nr %llu group_total %u bit_nr %d",
|
||||
SCSB_TRACE_ARGS, __entry->grp, __entry->group_nr, __entry->group_total,
|
||||
__entry->bit_nr, __entry->bit_count)
|
||||
__entry->bit_nr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_alloc,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_free,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_inc,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_dec,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_request,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_omap_group_class, scoutfs_omap_group_destroy,
|
||||
TP_PROTO(struct super_block *sb, void *grp, u64 group_nr, unsigned int group_total,
|
||||
int bit_nr, int bit_count),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr, bit_count)
|
||||
int bit_nr),
|
||||
TP_ARGS(sb, grp, group_nr, group_total, bit_nr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_omap_should_delete,
|
||||
@@ -2701,6 +3001,102 @@ TRACE_EVENT(scoutfs_omap_should_delete,
|
||||
SCSB_TRACE_ARGS, __entry->ino, __entry->nlink, __entry->ret)
|
||||
);
|
||||
|
||||
#define SSCF_FMT "[bo %llu bs %llu es %llu]"
|
||||
#define SSCF_FIELDS(pref) \
|
||||
__field(__u64, pref##_blkno) \
|
||||
__field(__u64, pref##_blocks) \
|
||||
__field(__u64, pref##_entries)
|
||||
#define SSCF_ASSIGN(pref, sfl) \
|
||||
__entry->pref##_blkno = le64_to_cpu((sfl)->ref.blkno); \
|
||||
__entry->pref##_blocks = le64_to_cpu((sfl)->blocks); \
|
||||
__entry->pref##_entries = le64_to_cpu((sfl)->entries);
|
||||
#define SSCF_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_blkno, \
|
||||
__entry->pref##_blocks, \
|
||||
__entry->pref##_entries
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_srch_compact_class,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_srch_compact *sc),
|
||||
|
||||
TP_ARGS(sb, sc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(__u64, id)
|
||||
__field(__u8, nr)
|
||||
__field(__u8, flags)
|
||||
SSCF_FIELDS(out)
|
||||
__field(__u64, in0_blk)
|
||||
__field(__u64, in0_pos)
|
||||
SSCF_FIELDS(in0)
|
||||
__field(__u64, in1_blk)
|
||||
__field(__u64, in1_pos)
|
||||
SSCF_FIELDS(in1)
|
||||
__field(__u64, in2_blk)
|
||||
__field(__u64, in2_pos)
|
||||
SSCF_FIELDS(in2)
|
||||
__field(__u64, in3_blk)
|
||||
__field(__u64, in3_pos)
|
||||
SSCF_FIELDS(in3)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->id = le64_to_cpu(sc->id);
|
||||
__entry->nr = sc->nr;
|
||||
__entry->flags = sc->flags;
|
||||
SSCF_ASSIGN(out, &sc->out)
|
||||
__entry->in0_blk = le64_to_cpu(sc->in[0].blk);
|
||||
__entry->in0_pos = le64_to_cpu(sc->in[0].pos);
|
||||
SSCF_ASSIGN(in0, &sc->in[0].sfl)
|
||||
__entry->in1_blk = le64_to_cpu(sc->in[0].blk);
|
||||
__entry->in1_pos = le64_to_cpu(sc->in[0].pos);
|
||||
SSCF_ASSIGN(in1, &sc->in[1].sfl)
|
||||
__entry->in2_blk = le64_to_cpu(sc->in[0].blk);
|
||||
__entry->in2_pos = le64_to_cpu(sc->in[0].pos);
|
||||
SSCF_ASSIGN(in2, &sc->in[2].sfl)
|
||||
__entry->in3_blk = le64_to_cpu(sc->in[0].blk);
|
||||
__entry->in3_pos = le64_to_cpu(sc->in[0].pos);
|
||||
SSCF_ASSIGN(in3, &sc->in[3].sfl)
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" id %llu nr %u flags 0x%x out "SSCF_FMT" in0 b %llu p %llu "SSCF_FMT" in1 b %llu p %llu "SSCF_FMT" in2 b %llu p %llu "SSCF_FMT" in3 b %llu p %llu "SSCF_FMT,
|
||||
SCSB_TRACE_ARGS, __entry->id, __entry->nr, __entry->flags, SSCF_ENTRY_ARGS(out),
|
||||
__entry->in0_blk, __entry->in0_pos, SSCF_ENTRY_ARGS(in0),
|
||||
__entry->in1_blk, __entry->in1_pos, SSCF_ENTRY_ARGS(in1),
|
||||
__entry->in2_blk, __entry->in2_pos, SSCF_ENTRY_ARGS(in2),
|
||||
__entry->in3_blk, __entry->in3_pos, SSCF_ENTRY_ARGS(in3))
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_srch_compact_class, scoutfs_srch_compact_client_send,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_srch_compact *sc),
|
||||
TP_ARGS(sb, sc)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_srch_compact_class, scoutfs_srch_compact_client_recv,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_srch_compact *sc),
|
||||
TP_ARGS(sb, sc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_ioc_search_xattrs,
|
||||
TP_PROTO(struct super_block *sb, u64 ino, u64 last_ino),
|
||||
|
||||
TP_ARGS(sb, ino, last_ino),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(u64, ino)
|
||||
__field(u64, last_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ino = ino;
|
||||
__entry->last_ino = last_ino;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ino %llu last_ino %llu", SCSB_TRACE_ARGS,
|
||||
__entry->ino, __entry->last_ino)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_SCOUTFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
1407
kmod/src/server.c
1407
kmod/src/server.c
File diff suppressed because it is too large
Load Diff
@@ -64,8 +64,6 @@ int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
void scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
|
||||
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
|
||||
@@ -77,9 +75,12 @@ u64 scoutfs_server_seq(struct super_block *sb);
|
||||
u64 scoutfs_server_next_seq(struct super_block *sb);
|
||||
void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq);
|
||||
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
void scoutfs_server_start(struct super_block *sb, struct scoutfs_quorum_config *qconf, u64 term);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
void scoutfs_server_stop_wait(struct super_block *sb);
|
||||
bool scoutfs_server_is_running(struct super_block *sb);
|
||||
bool scoutfs_server_is_up(struct super_block *sb);
|
||||
bool scoutfs_server_is_down(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_setup(struct super_block *sb);
|
||||
void scoutfs_server_destroy(struct super_block *sb);
|
||||
|
||||
45
kmod/src/sparse-filtered.sh
Executable file
45
kmod/src/sparse-filtered.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Unfortunately, kernels can ship which contain sparse errors that are
|
||||
# unrelated to us.
|
||||
#
|
||||
# The exit status of this filtering wrapper will indicate an error if
|
||||
# sparse wasn't found or if there were any unfiltered output lines. It
|
||||
# can hide error exit status from sparse or grep if they don't produce
|
||||
# output that makes it past the filters.
|
||||
#
|
||||
|
||||
# must have sparse. Fail with error message, mask success path.
|
||||
which sparse > /dev/null || exit 1
|
||||
|
||||
# initial unmatchable, additional added as RE+="|..."
|
||||
RE="$^"
|
||||
|
||||
#
|
||||
# Darn. sparse has multi-line error messages, and I'd rather not bother
|
||||
# with multi-line filters. So we'll just drop this context.
|
||||
#
|
||||
# command-line: note: in included file (through include/linux/netlink.h, include/linux/ethtool.h, include/linux/netdevice.h, include/net/sock.h, /root/scoutfs/kmod/src/kernelcompat.h, builtin):
|
||||
# fprintf(stderr, "%s: note: in included file%s:\n",
|
||||
#
|
||||
RE+="|: note: in included file"
|
||||
|
||||
# 3.10.0-1160.119.1.el7.x86_64.debug
|
||||
# include/linux/posix_acl.h:138:9: warning: incorrect type in assignment (different address spaces)
|
||||
# include/linux/posix_acl.h:138:9: expected struct posix_acl *<noident>
|
||||
# include/linux/posix_acl.h:138:9: got struct posix_acl [noderef] <asn:4>*<noident>
|
||||
RE+="|include/linux/posix_acl.h:"
|
||||
|
||||
# 3.10.0-1160.119.1.el7.x86_64.debug
|
||||
#include/uapi/linux/perf_event.h:146:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
|
||||
RE+="|include/uapi/linux/perf_event.h:"
|
||||
|
||||
# 4.18.0-513.24.1.el8_9.x86_64+debug'
|
||||
#./include/linux/skbuff.h:824:1: warning: directive in macro's argument list
|
||||
RE+="|include/linux/skbuff.h:"
|
||||
|
||||
sparse "$@" |& \
|
||||
grep -E -v "($RE)" |& \
|
||||
awk '{ print $0 } END { exit NR > 0 }'
|
||||
exit $?
|
||||
263
kmod/src/srch.c
263
kmod/src/srch.c
@@ -18,6 +18,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "format.h"
|
||||
@@ -30,6 +31,9 @@
|
||||
#include "client.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "triggers.h"
|
||||
#include "sysfs.h"
|
||||
#include "msg.h"
|
||||
|
||||
/*
|
||||
* This srch subsystem gives us a way to find inodes that have a given
|
||||
@@ -58,7 +62,7 @@
|
||||
* re-allocated and re-written. Search can restart by checking the
|
||||
* btree for the current set of files. Compaction reads log files which
|
||||
* are protected from other compactions by the persistent busy items
|
||||
* created by the server. Compaction won't see it's blocks reused out
|
||||
* created by the server. Compaction won't see its blocks reused out
|
||||
* from under it, but it can encounter stale cached blocks that need to
|
||||
* be invalidated.
|
||||
*/
|
||||
@@ -68,10 +72,14 @@ struct srch_info {
|
||||
atomic_t shutdown;
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work compact_dwork;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
atomic_t compact_delay_ms;
|
||||
};
|
||||
|
||||
#define DECLARE_SRCH_INFO(sb, name) \
|
||||
struct srch_info *name = SCOUTFS_SB(sb)->srch_info
|
||||
#define DECLARE_SRCH_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_SRCH_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
#define SRE_FMT "%016llx.%llu.%llu"
|
||||
#define SRE_ARG(sre) \
|
||||
@@ -434,6 +442,10 @@ out:
|
||||
if (ret == 0 && (flags & GFB_INSERT) && blk >= le64_to_cpu(sfl->blocks))
|
||||
sfl->blocks = cpu_to_le64(blk + 1);
|
||||
|
||||
if (bl) {
|
||||
trace_scoutfs_get_file_block(sb, bl->blkno, flags);
|
||||
}
|
||||
|
||||
*bl_ret = bl;
|
||||
return ret;
|
||||
}
|
||||
@@ -520,6 +532,95 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Padded entries are encoded in pairs after an existing entry. All of
|
||||
* the pairs cancel each other out by all readers (the second encoding
|
||||
* looks like deletion) so they aren't visible to the first/last bounds of
|
||||
* the block or file.
|
||||
*/
|
||||
static int append_padded_entry(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb, struct scoutfs_srch_entry *sre)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = encode_entry(srb->entries + le32_to_cpu(srb->entry_bytes),
|
||||
sre, &srb->tail);
|
||||
if (ret > 0) {
|
||||
srb->tail = *sre;
|
||||
le32_add_cpu(&srb->entry_nr, 1);
|
||||
le32_add_cpu(&srb->entry_bytes, ret);
|
||||
le64_add_cpu(&sfl->entries, 1);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called by a testing trigger to create a very specific case of
|
||||
* encoded entry offsets. We want the last entry in the block to start
|
||||
* precisely at the _SAFE_BYTES offset.
|
||||
*
|
||||
* This is called when there is a single existing entry in the block.
|
||||
* We have the entire block to work with. We encode pairs of matching
|
||||
* entries. This hides them from readers (both searches and merging) as
|
||||
* they're interpreted as creation and deletion and are deleted. We use
|
||||
* the existing hash value of the first entry in the block but then set
|
||||
* the inode to an impossibly large number so it doesn't interfere with
|
||||
* anything.
|
||||
*
|
||||
* To hit the specific offset we very carefully manage the amount of
|
||||
* bytes of change between fields in the entry. We know that if we
|
||||
* change all the byte of the ino and id we end up with a 20 byte
|
||||
* (2+8+8,2) encoding of the pair of entries. To have the last entry
|
||||
* start at the _SAFE_POS offset we know that the final 20 byte pair
|
||||
* encoding needs to end at 2 bytes (second entry encoding) after the
|
||||
* _SAFE_POS offset.
|
||||
*
|
||||
* So as we encode pairs we watch the delta of our current offset from
|
||||
* that desired final offset of 2 past _SAFE_POS. If we're a multiple
|
||||
* of 20 away then we encode the full 20 byte pairs. If we're not, then
|
||||
* we drop a byte to encode 19 bytes. That'll slowly change the offset
|
||||
* to be a multiple of 20 again while encoding large entries.
|
||||
*/
|
||||
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb)
|
||||
{
|
||||
struct scoutfs_srch_entry sre;
|
||||
u32 target;
|
||||
s32 diff;
|
||||
u64 hash;
|
||||
u64 ino;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
hash = le64_to_cpu(srb->tail.hash);
|
||||
ino = le64_to_cpu(srb->tail.ino) | (1ULL << 62);
|
||||
id = le64_to_cpu(srb->tail.id);
|
||||
|
||||
target = SCOUTFS_SRCH_BLOCK_SAFE_BYTES + 2;
|
||||
|
||||
while ((diff = target - le32_to_cpu(srb->entry_bytes)) > 0) {
|
||||
ino ^= 1ULL << (7 * 8);
|
||||
if (diff % 20 == 0) {
|
||||
id ^= 1ULL << (7 * 8);
|
||||
} else {
|
||||
id ^= 1ULL << (6 * 8);
|
||||
}
|
||||
|
||||
sre.hash = cpu_to_le64(hash);
|
||||
sre.ino = cpu_to_le64(ino);
|
||||
sre.id = cpu_to_le64(id);
|
||||
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
if (ret == 0)
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
diff = target - le32_to_cpu(srb->entry_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is dropping an ino/id because the tracking rbtree is full.
|
||||
* This loses information so we can't return any entries at or after the
|
||||
@@ -652,14 +753,14 @@ static int search_log_file(struct super_block *sb,
|
||||
for (i = 0; i < le32_to_cpu(srb->entry_nr); i++) {
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, &sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
@@ -762,14 +863,14 @@ static int search_sorted_file(struct super_block *sb,
|
||||
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, &sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
@@ -861,7 +962,6 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
struct scoutfs_srch_rb_root *sroot,
|
||||
u64 hash, u64 ino, u64 last_ino, bool *done)
|
||||
{
|
||||
struct scoutfs_net_roots prev_roots;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_srch_entry start;
|
||||
struct scoutfs_srch_entry end;
|
||||
@@ -869,15 +969,17 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_srch_file sfl;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
struct scoutfs_key key;
|
||||
unsigned long limit = SRCH_LIMIT;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, srch_search_xattrs);
|
||||
|
||||
trace_scoutfs_ioc_search_xattrs(sb, ino, last_ino);
|
||||
|
||||
*done = false;
|
||||
srch_init_rb_root(sroot);
|
||||
memset(&prev_roots, 0, sizeof(prev_roots));
|
||||
|
||||
start.hash = cpu_to_le64(hash);
|
||||
start.ino = cpu_to_le64(ino);
|
||||
@@ -892,7 +994,6 @@ retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
memset(&roots.fs_root, 0, sizeof(roots.fs_root));
|
||||
|
||||
end = final;
|
||||
|
||||
@@ -968,16 +1069,10 @@ retry:
|
||||
*done = sre_cmp(&end, &final) == 0;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_roots, &roots, sizeof(roots)) == 0) {
|
||||
scoutfs_inc_counter(sb, srch_search_stale_eio);
|
||||
ret = -EIO;
|
||||
} else {
|
||||
scoutfs_inc_counter(sb, srch_search_stale_retry);
|
||||
prev_roots = roots;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.srch_root.ref,
|
||||
&roots.logs_root.ref);
|
||||
if (ret == -ESTALE)
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -995,6 +1090,9 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (sfl->ref.blkno && !force && scoutfs_trigger(sb, SRCH_FORCE_LOG_ROTATE))
|
||||
force = true;
|
||||
|
||||
if (sfl->ref.blkno == 0 ||
|
||||
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
||||
return 0;
|
||||
@@ -1003,6 +1101,14 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
le64_to_cpu(sfl->ref.blkno), 0);
|
||||
ret = scoutfs_btree_insert(sb, alloc, wri, root, &key,
|
||||
sfl, sizeof(*sfl));
|
||||
/*
|
||||
* While it's fine to replay moving the client's logging srch
|
||||
* file to the core btree item, server commits should keep it
|
||||
* from happening. So we'll warn if we see it happen. This can
|
||||
* be removed eventually.
|
||||
*/
|
||||
if (WARN_ON_ONCE(ret == -EEXIST))
|
||||
ret = 0;
|
||||
if (ret == 0) {
|
||||
memset(sfl, 0, sizeof(*sfl));
|
||||
scoutfs_inc_counter(sb, srch_rotate_log);
|
||||
@@ -1462,7 +1568,7 @@ static int kway_merge(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_file *sfl,
|
||||
kway_get_t kway_get, kway_advance_t kway_adv,
|
||||
void **args, int nr)
|
||||
void **args, int nr, bool logs_input)
|
||||
{
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
struct scoutfs_srch_block *srb = NULL;
|
||||
@@ -1489,8 +1595,7 @@ static int kway_merge(struct super_block *sb,
|
||||
nr_parents = max_t(unsigned long, 1, roundup_pow_of_two(nr) - 1);
|
||||
/* root at [1] for easy sib/parent index calc, final pad for odd sib */
|
||||
nr_nodes = 1 + nr_parents + nr + 1;
|
||||
tnodes = __vmalloc(nr_nodes * sizeof(struct tourn_node),
|
||||
GFP_NOFS, PAGE_KERNEL);
|
||||
tnodes = kc__vmalloc(nr_nodes * sizeof(struct tourn_node), GFP_NOFS);
|
||||
if (!tnodes)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1567,6 +1672,15 @@ static int kway_merge(struct super_block *sb,
|
||||
blk++;
|
||||
}
|
||||
|
||||
/* end sorted block on _SAFE offset for testing */
|
||||
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
|
||||
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
|
||||
pad_entries_at_safe(sfl, blk, srb);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
blk++;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, srch_compact_entry);
|
||||
|
||||
} else {
|
||||
@@ -1609,6 +1723,8 @@ static int kway_merge(struct super_block *sb,
|
||||
empty++;
|
||||
ret = 0;
|
||||
} else if (ret < 0) {
|
||||
if (ret == -ENOANO) /* just testing trigger */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1692,7 +1808,7 @@ static void swap_page_sre(void *A, void *B, int size)
|
||||
* typically, ~10x worst case).
|
||||
*
|
||||
* Because we read and sort all the input files we must perform the full
|
||||
* compaction in one operation. The server must have given us a
|
||||
* compaction in one operation. The server must have given us
|
||||
* sufficiently large avail/freed lists, otherwise we'll return ENOSPC.
|
||||
*/
|
||||
static int compact_logs(struct super_block *sb,
|
||||
@@ -1747,7 +1863,7 @@ static int compact_logs(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
page->private = 0;
|
||||
list_add_tail(&page->list, &pages);
|
||||
list_add_tail(&page->lru, &pages);
|
||||
nr_pages++;
|
||||
scoutfs_inc_counter(sb, srch_compact_log_page);
|
||||
}
|
||||
@@ -1756,14 +1872,14 @@ static int compact_logs(struct super_block *sb,
|
||||
|
||||
if (pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = decode_entry(srb->entries + pos, sre, &prev);
|
||||
if (ret <= 0) {
|
||||
/* can only be inconsistency :/ */
|
||||
ret = EIO;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
prev = *sre;
|
||||
@@ -1800,7 +1916,7 @@ static int compact_logs(struct super_block *sb,
|
||||
|
||||
/* sort page entries and reset private for _next */
|
||||
i = 0;
|
||||
list_for_each_entry(page, &pages, list) {
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
args[i++] = page;
|
||||
|
||||
if (atomic_read(&srinf->shutdown)) {
|
||||
@@ -1816,12 +1932,12 @@ static int compact_logs(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_page, kway_adv_page,
|
||||
args, nr_pages);
|
||||
args, nr_pages, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* make sure we finished all the pages */
|
||||
list_for_each_entry(page, &pages, list) {
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
sre = page_priv_sre(page);
|
||||
if (page->private < SRES_PER_PAGE && sre->ino != 0) {
|
||||
ret = -ENOSPC;
|
||||
@@ -1834,8 +1950,8 @@ static int compact_logs(struct super_block *sb,
|
||||
out:
|
||||
scoutfs_block_put(sb, bl);
|
||||
vfree(args);
|
||||
list_for_each_entry_safe(page, tmp, &pages, list) {
|
||||
list_del(&page->list);
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
@@ -1874,12 +1990,18 @@ static int kway_get_reader(struct super_block *sb,
|
||||
srb = rdr->bl->data;
|
||||
|
||||
if (rdr->pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
|
||||
/* XXX inconsistency */
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rdr->decoded_bytes == 0 && rdr->pos == SCOUTFS_SRCH_BLOCK_SAFE_BYTES &&
|
||||
scoutfs_trigger(sb, SRCH_MERGE_STOP_SAFE)) {
|
||||
/* only used in testing */
|
||||
return -ENOANO;
|
||||
}
|
||||
|
||||
/* decode entry, possibly skipping start of the block */
|
||||
while (rdr->decoded_bytes == 0 || rdr->pos < rdr->skip) {
|
||||
ret = decode_entry(srb->entries + rdr->pos,
|
||||
@@ -1969,7 +2091,7 @@ static int compact_sorted(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_reader,
|
||||
kway_adv_reader, args, nr);
|
||||
kway_adv_reader, args, nr, false);
|
||||
|
||||
sc->flags |= SCOUTFS_SRCH_COMPACT_FLAG_DONE;
|
||||
for (i = 0; i < nr; i++) {
|
||||
@@ -2098,8 +2220,15 @@ static int delete_files(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* wait 10s between compact attempts on error, immediate after success */
|
||||
#define SRCH_COMPACT_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
static void queue_compact_work(struct srch_info *srinf, bool immediate)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = immediate ? 0 : msecs_to_jiffies(atomic_read(&srinf->compact_delay_ms));
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a compaction operation from the server, sort the entries from the
|
||||
@@ -2127,7 +2256,6 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
struct super_block *sb = srinf->sb;
|
||||
struct scoutfs_block_writer wri;
|
||||
struct scoutfs_alloc alloc;
|
||||
unsigned long delay;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
@@ -2140,6 +2268,8 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
scoutfs_block_writer_init(sb, &wri);
|
||||
|
||||
ret = scoutfs_client_srch_get_compact(sb, sc);
|
||||
if (ret >= 0)
|
||||
trace_scoutfs_srch_compact_client_recv(sb, sc);
|
||||
if (ret < 0 || sc->nr == 0)
|
||||
goto out;
|
||||
|
||||
@@ -2168,6 +2298,7 @@ commit:
|
||||
sc->meta_freed = alloc.freed;
|
||||
sc->flags |= ret < 0 ? SCOUTFS_SRCH_COMPACT_FLAG_ERROR : 0;
|
||||
|
||||
trace_scoutfs_srch_compact_client_send(sb, sc);
|
||||
err = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
@@ -2178,14 +2309,56 @@ out:
|
||||
scoutfs_inc_counter(sb, srch_compact_error);
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
queue_compact_work(srinf, sc->nr > 0 && ret == 0);
|
||||
|
||||
kfree(sc);
|
||||
}
|
||||
|
||||
static ssize_t compact_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_SRCH_INFO_KOBJ(kobj, srinf);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", atomic_read(&srinf->compact_delay_ms));
|
||||
}
|
||||
|
||||
#define MIN_COMPACT_DELAY_MS MSEC_PER_SEC
|
||||
#define DEF_COMPACT_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_COMPACT_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
static ssize_t compact_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_COMPACT_DELAY_MS || val > MAX_COMPACT_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid compact_delay_ms value, must be between %lu and %lu",
|
||||
MIN_COMPACT_DELAY_MS, MAX_COMPACT_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_set(&srinf->compact_delay_ms, val);
|
||||
cancel_delayed_work(&srinf->compact_dwork);
|
||||
queue_compact_work(srinf, false);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(compact_delay_ms);
|
||||
|
||||
static struct attribute *srch_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(compact_delay_ms),
|
||||
NULL,
|
||||
};
|
||||
|
||||
void scoutfs_srch_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -2202,6 +2375,8 @@ void scoutfs_srch_destroy(struct super_block *sb)
|
||||
destroy_workqueue(srinf->workq);
|
||||
}
|
||||
|
||||
scoutfs_sysfs_destroy_attrs(sb, &srinf->ssa);
|
||||
|
||||
kfree(srinf);
|
||||
sbi->srch_info = NULL;
|
||||
}
|
||||
@@ -2219,8 +2394,15 @@ int scoutfs_srch_setup(struct super_block *sb)
|
||||
srinf->sb = sb;
|
||||
atomic_set(&srinf->shutdown, 0);
|
||||
INIT_DELAYED_WORK(&srinf->compact_dwork, scoutfs_srch_compact_worker);
|
||||
scoutfs_sysfs_init_attrs(sb, &srinf->ssa);
|
||||
atomic_set(&srinf->compact_delay_ms, DEF_COMPACT_DELAY_MS);
|
||||
|
||||
sbi->srch_info = srinf;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &srinf->ssa, srch_attrs, "srch");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
srinf->workq = alloc_workqueue("scoutfs_srch_compact",
|
||||
WQ_NON_REENTRANT | WQ_UNBOUND |
|
||||
WQ_HIGHPRI, 0);
|
||||
@@ -2229,8 +2411,7 @@ int scoutfs_srch_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork,
|
||||
msecs_to_jiffies(SRCH_COMPACT_DELAY_MS));
|
||||
queue_compact_work(srinf, false);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
128
kmod/src/super.c
128
kmod/src/super.c
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/magic.h>
|
||||
@@ -47,6 +48,9 @@
|
||||
#include "omap.h"
|
||||
#include "volopt.h"
|
||||
#include "fence.h"
|
||||
#include "xattr.h"
|
||||
#include "wkic.h"
|
||||
#include "quota.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
static struct dentry *scoutfs_debugfs_root;
|
||||
@@ -132,44 +136,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts->metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int scoutfs_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
trace_scoutfs_sync_fs(sb, wait);
|
||||
@@ -194,7 +160,17 @@ static void scoutfs_metadev_close(struct super_block *sb)
|
||||
* from kill_sb->put_super.
|
||||
*/
|
||||
lockdep_off();
|
||||
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
bdev_fput(sbi->meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
blkdev_put(sbi->meta_bdev, sb);
|
||||
#else
|
||||
blkdev_put(sbi->meta_bdev, SCOUTFS_META_BDEV_MODE);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
lockdep_on();
|
||||
sbi->meta_bdev = NULL;
|
||||
}
|
||||
@@ -215,7 +191,7 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
/*
|
||||
* Wait for invalidation and iput to finish with any lingering
|
||||
* inode references that escaped the evict_inodes in
|
||||
* generic_shutdown_super. MS_ACTIVE is clear so final iput
|
||||
* generic_shutdown_super. SB_ACTIVE is clear so final iput
|
||||
* will always evict.
|
||||
*/
|
||||
scoutfs_lock_flush_invalidate(sb);
|
||||
@@ -230,7 +206,9 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_shutdown_trans(sb);
|
||||
scoutfs_volopt_destroy(sb);
|
||||
scoutfs_client_destroy(sb);
|
||||
scoutfs_quota_destroy(sb);
|
||||
scoutfs_inode_destroy(sb);
|
||||
scoutfs_wkic_destroy(sb);
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
scoutfs_data_destroy(sb);
|
||||
@@ -246,13 +224,11 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_fence_destroy(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
scoutfs_sysfs_destroy_attrs(sb, &sbi->mopts_ssa);
|
||||
debugfs_remove(sbi->debug_root);
|
||||
scoutfs_destroy_counters(sb);
|
||||
scoutfs_destroy_sysfs(sb);
|
||||
scoutfs_metadev_close(sb);
|
||||
|
||||
kfree(sbi->opts.metadev_path);
|
||||
kfree(sbi);
|
||||
|
||||
sb->s_fs_info = NULL;
|
||||
@@ -271,6 +247,8 @@ static void scoutfs_umount_begin(struct super_block *sb)
|
||||
|
||||
scoutfs_warn(sb, "forcing unmount, can return errors and lose unsynced data");
|
||||
sbi->forced_unmount = true;
|
||||
|
||||
scoutfs_client_net_shutdown(sb);
|
||||
}
|
||||
|
||||
static const struct super_operations scoutfs_super_ops = {
|
||||
@@ -280,7 +258,7 @@ static const struct super_operations scoutfs_super_ops = {
|
||||
.destroy_inode = scoutfs_destroy_inode,
|
||||
.sync_fs = scoutfs_sync_fs,
|
||||
.statfs = scoutfs_statfs,
|
||||
.show_options = scoutfs_show_options,
|
||||
.show_options = scoutfs_options_show,
|
||||
.put_super = scoutfs_put_super,
|
||||
.umount_begin = scoutfs_umount_begin,
|
||||
};
|
||||
@@ -498,9 +476,8 @@ static int scoutfs_read_supers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
sbi->fsid = le64_to_cpu(meta_super->hdr.fsid);
|
||||
sbi->fmt_vers = le64_to_cpu(meta_super->fmt_vers);
|
||||
sbi->super = *meta_super;
|
||||
out:
|
||||
kfree(meta_super);
|
||||
kfree(data_super);
|
||||
@@ -509,9 +486,13 @@ out:
|
||||
|
||||
static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct mount_options opts;
|
||||
struct scoutfs_mount_options opts;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#else
|
||||
struct block_device *meta_bdev;
|
||||
#endif
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
@@ -520,8 +501,11 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_magic = SCOUTFS_SUPER_MAGIC;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
sb->s_op = &scoutfs_super_ops;
|
||||
sb->s_d_op = &scoutfs_dentry_ops;
|
||||
sb->s_export_op = &scoutfs_export_ops;
|
||||
sb->s_flags |= MS_I_VERSION;
|
||||
sb->s_xattr = scoutfs_xattr_handlers;
|
||||
sb->s_flags |= SB_I_VERSION | SB_POSIXACL;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
/* btree blocks use long lived bh->b_data refs */
|
||||
mapping_set_gfp_mask(sb->s_bdev->bd_inode->i_mapping, GFP_NOFS);
|
||||
@@ -534,18 +518,17 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
ret = assign_random_id(sbi);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
spin_lock_init(&sbi->next_ino_lock);
|
||||
spin_lock_init(&sbi->data_wait_root.lock);
|
||||
sbi->data_wait_root.root = RB_ROOT;
|
||||
scoutfs_sysfs_init_attrs(sb, &sbi->mopts_ssa);
|
||||
|
||||
ret = scoutfs_parse_options(sb, data, &opts);
|
||||
if (ret)
|
||||
/* parse options early for use during setup */
|
||||
ret = scoutfs_options_early_setup(sb, data);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
sbi->opts = opts;
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
ret = sb_set_blocksize(sb, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != SCOUTFS_BLOCK_SM_SIZE) {
|
||||
@@ -554,9 +537,27 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
meta_bdev =
|
||||
blkdev_get_by_path(sbi->opts.metadev_path,
|
||||
SCOUTFS_META_BDEV_MODE, sb);
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
/*
|
||||
* pass sbi as holder, since dev_mount already passes sb, which triggers a
|
||||
* WARN_ON because dev_mount also passes non-NULL hops. By passing sbi
|
||||
* here we just get a simple error in our test cases.
|
||||
*/
|
||||
meta_bdev_file = bdev_file_open_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sbi, NULL);
|
||||
if (IS_ERR(meta_bdev_file)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev_file));
|
||||
ret = PTR_ERR(meta_bdev_file);
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev_file = meta_bdev_file;
|
||||
sbi->meta_bdev = file_bdev(meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb, NULL);
|
||||
#else
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb);
|
||||
#endif
|
||||
if (IS_ERR(meta_bdev)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev));
|
||||
@@ -564,6 +565,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev = meta_bdev;
|
||||
#endif
|
||||
|
||||
ret = set_blocksize(sbi->meta_bdev, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != 0) {
|
||||
scoutfs_err(sb, "failed to set metadev blocksize, returned %d",
|
||||
@@ -576,14 +579,14 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_sysfs(sb) ?:
|
||||
scoutfs_setup_counters(sb) ?:
|
||||
scoutfs_options_setup(sb) ?:
|
||||
scoutfs_sysfs_create_attrs(sb, &sbi->mopts_ssa,
|
||||
mount_options_attrs, "mount_options") ?:
|
||||
scoutfs_setup_triggers(sb) ?:
|
||||
scoutfs_fence_setup(sb) ?:
|
||||
scoutfs_block_setup(sb) ?:
|
||||
scoutfs_forest_setup(sb) ?:
|
||||
scoutfs_item_setup(sb) ?:
|
||||
scoutfs_wkic_setup(sb) ?:
|
||||
scoutfs_inode_setup(sb) ?:
|
||||
scoutfs_quota_setup(sb) ?:
|
||||
scoutfs_data_setup(sb) ?:
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_omap_setup(sb) ?:
|
||||
@@ -599,7 +602,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
|
||||
/* this interruptible iget lets hung mount be aborted with ctl-c */
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO, SCOUTFS_LKF_INTERRUPTIBLE);
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO, SCOUTFS_LKF_INTERRUPTIBLE, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
if (ret == -ERESTARTSYS)
|
||||
@@ -650,6 +653,7 @@ static void scoutfs_kill_sb(struct super_block *sb)
|
||||
}
|
||||
|
||||
if (SCOUTFS_HAS_SBI(sb)) {
|
||||
scoutfs_options_stop(sb);
|
||||
scoutfs_inode_orphan_stop(sb);
|
||||
scoutfs_lock_unmount_begin(sb);
|
||||
}
|
||||
@@ -670,7 +674,6 @@ MODULE_ALIAS_FS("scoutfs");
|
||||
static void teardown_module(void)
|
||||
{
|
||||
debugfs_remove(scoutfs_debugfs_root);
|
||||
scoutfs_dir_exit();
|
||||
scoutfs_inode_exit();
|
||||
scoutfs_sysfs_exit();
|
||||
}
|
||||
@@ -708,21 +711,20 @@ static int __init scoutfs_module_init(void)
|
||||
goto out;
|
||||
}
|
||||
ret = scoutfs_inode_init() ?:
|
||||
scoutfs_dir_init() ?:
|
||||
register_filesystem(&scoutfs_fs_type);
|
||||
out:
|
||||
if (ret)
|
||||
teardown_module();
|
||||
return ret;
|
||||
}
|
||||
module_init(scoutfs_module_init)
|
||||
module_init(scoutfs_module_init);
|
||||
|
||||
static void __exit scoutfs_module_exit(void)
|
||||
{
|
||||
unregister_filesystem(&scoutfs_fs_type);
|
||||
teardown_module();
|
||||
}
|
||||
module_exit(scoutfs_module_exit)
|
||||
module_exit(scoutfs_module_exit);
|
||||
|
||||
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -30,20 +30,25 @@ struct recov_info;
|
||||
struct omap_info;
|
||||
struct volopt_info;
|
||||
struct fence_info;
|
||||
struct wkic_info;
|
||||
struct squota_info;
|
||||
|
||||
struct scoutfs_sb_info {
|
||||
struct super_block *sb;
|
||||
|
||||
/* assigned once at the start of each mount, read-only */
|
||||
u64 fsid;
|
||||
u64 rid;
|
||||
u64 fmt_vers;
|
||||
|
||||
struct scoutfs_super_block super;
|
||||
|
||||
struct block_device *meta_bdev;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#endif
|
||||
|
||||
spinlock_t next_ino_lock;
|
||||
|
||||
struct options_info *options_info;
|
||||
struct data_info *data_info;
|
||||
struct inode_sb_info *inode_sb_info;
|
||||
struct btree_info *btree_info;
|
||||
@@ -55,6 +60,8 @@ struct scoutfs_sb_info {
|
||||
struct omap_info *omap_info;
|
||||
struct volopt_info *volopt_info;
|
||||
struct item_cache_info *item_cache_info;
|
||||
struct wkic_info *wkic_info;
|
||||
struct squota_info *squota_info;
|
||||
struct fence_info *fence_info;
|
||||
|
||||
/* tracks tasks waiting for data extents */
|
||||
@@ -74,10 +81,6 @@ struct scoutfs_sb_info {
|
||||
struct scoutfs_counters *counters;
|
||||
struct scoutfs_triggers *triggers;
|
||||
|
||||
struct mount_options opts;
|
||||
struct options_sb_info *options;
|
||||
struct scoutfs_sysfs_attrs mopts_ssa;
|
||||
|
||||
struct dentry *debug_root;
|
||||
|
||||
bool forced_unmount;
|
||||
@@ -101,7 +104,11 @@ static inline bool SCOUTFS_IS_META_BDEV(struct scoutfs_super_block *super_block)
|
||||
return !!(le64_to_cpu(super_block->flags) & SCOUTFS_FLAG_IS_META_BDEV);
|
||||
}
|
||||
|
||||
#ifdef KC_HAVE_BLK_MODE_T
|
||||
#define SCOUTFS_META_BDEV_MODE (BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_EXCL)
|
||||
#else
|
||||
#define SCOUTFS_META_BDEV_MODE (FMODE_READ | FMODE_WRITE | FMODE_EXCL)
|
||||
#endif
|
||||
|
||||
static inline bool scoutfs_forcing_unmount(struct super_block *sb)
|
||||
{
|
||||
@@ -138,14 +145,14 @@ static inline bool scoutfs_unmounting(struct super_block *sb)
|
||||
(int)(le64_to_cpu(fsid) >> SCSB_SHIFT), \
|
||||
(int)(le64_to_cpu(rid) >> SCSB_SHIFT)
|
||||
#define SCSB_ARGS(sb) \
|
||||
(int)(le64_to_cpu(SCOUTFS_SB(sb)->super.hdr.fsid) >> SCSB_SHIFT), \
|
||||
(int)(SCOUTFS_SB(sb)->fsid >> SCSB_SHIFT), \
|
||||
(int)(SCOUTFS_SB(sb)->rid >> SCSB_SHIFT)
|
||||
#define SCSB_TRACE_FIELDS \
|
||||
__field(__u64, fsid) \
|
||||
__field(__u64, rid)
|
||||
#define SCSB_TRACE_ASSIGN(sb) \
|
||||
__entry->fsid = SCOUTFS_HAS_SBI(sb) ? \
|
||||
le64_to_cpu(SCOUTFS_SB(sb)->super.hdr.fsid) : 0;\
|
||||
SCOUTFS_SB(sb)->fsid : 0; \
|
||||
__entry->rid = SCOUTFS_HAS_SBI(sb) ? \
|
||||
SCOUTFS_SB(sb)->rid : 0;
|
||||
#define SCSB_TRACE_ARGS \
|
||||
@@ -160,4 +167,17 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
/* to keep this out of the ioctl.h public interface definition */
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
||||
/*
|
||||
* Returns 0 when supported, non-zero -errno when unsupported.
|
||||
*/
|
||||
static inline int scoutfs_fmt_vers_unsupported(struct super_block *sb, u64 vers)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi && (sbi->fmt_vers < vers))
|
||||
return -EOPNOTSUPP;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "sysfs.h"
|
||||
@@ -37,6 +38,15 @@ struct attr_funcs {
|
||||
#define ATTR_FUNCS_RO(_name) \
|
||||
static struct attr_funcs _name##_attr_funcs = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t data_device_maj_min_show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
MAJOR(sb->s_bdev->bd_dev), MINOR(sb->s_bdev->bd_dev));
|
||||
}
|
||||
ATTR_FUNCS_RO(data_device_maj_min);
|
||||
|
||||
static ssize_t format_version_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -51,10 +61,9 @@ static ssize_t fsid_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n",
|
||||
le64_to_cpu(super->hdr.fsid));
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n", sbi->fsid);
|
||||
}
|
||||
ATTR_FUNCS_RO(fsid);
|
||||
|
||||
@@ -101,6 +110,7 @@ static ssize_t attr_funcs_show(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
|
||||
static struct attribute *sb_id_attrs[] = {
|
||||
&data_device_maj_min_attr_funcs.attr,
|
||||
&format_version_attr_funcs.attr,
|
||||
&fsid_attr_funcs.attr,
|
||||
&rid_attr_funcs.attr,
|
||||
@@ -258,7 +268,7 @@ int __init scoutfs_sysfs_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __exit scoutfs_sysfs_exit(void)
|
||||
void scoutfs_sysfs_exit(void)
|
||||
{
|
||||
if (scoutfs_kset)
|
||||
kset_unregister(scoutfs_kset);
|
||||
|
||||
@@ -53,6 +53,6 @@ int scoutfs_setup_sysfs(struct super_block *sb);
|
||||
void scoutfs_destroy_sysfs(struct super_block *sb);
|
||||
|
||||
int __init scoutfs_sysfs_init(void);
|
||||
void __exit scoutfs_sysfs_exit(void);
|
||||
void scoutfs_sysfs_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
90
kmod/src/totl.c
Normal file
90
kmod/src/totl.c
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "forest.h"
|
||||
#include "totl.h"
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end)
|
||||
{
|
||||
scoutfs_key_set_zeros(start);
|
||||
start->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(end);
|
||||
end->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg)
|
||||
{
|
||||
memset(merg, 0, sizeof(struct scoutfs_totl_merging));
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic)
|
||||
{
|
||||
struct scoutfs_xattr_totl_val *tval = val;
|
||||
|
||||
if (fic & FIC_FS_ROOT) {
|
||||
merg->fs_seq = seq;
|
||||
merg->fs_total = le64_to_cpu(tval->total);
|
||||
merg->fs_count = le64_to_cpu(tval->count);
|
||||
} else if (fic & FIC_FINALIZED) {
|
||||
merg->fin_seq = seq;
|
||||
merg->fin_total += le64_to_cpu(tval->total);
|
||||
merg->fin_count += le64_to_cpu(tval->count);
|
||||
} else {
|
||||
merg->log_seq = seq;
|
||||
merg->log_total += le64_to_cpu(tval->total);
|
||||
merg->log_count += le64_to_cpu(tval->count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* .totl. item merging has to be careful because the log btree merging
|
||||
* code can write partial results to the fs_root. This means that a
|
||||
* reader can see both cases where new finalized logs should be applied
|
||||
* to the old fs items and where old finalized logs have already been
|
||||
* applied to the partially merged fs items. Currently active logged
|
||||
* items are always applied on top of all cases.
|
||||
*
|
||||
* These cases are differentiated with a combination of sequence numbers
|
||||
* in items, the count of contributing xattrs, and a flag
|
||||
* differentiating finalized and active logged items. This lets us
|
||||
* recognize all cases, including when finalized logs were merged and
|
||||
* deleted the fs item.
|
||||
*/
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count)
|
||||
{
|
||||
*total = 0;
|
||||
*count = 0;
|
||||
|
||||
/* start with the fs item if we have it */
|
||||
if (merg->fs_seq != 0) {
|
||||
*total = merg->fs_total;
|
||||
*count = merg->fs_count;
|
||||
}
|
||||
|
||||
/* apply finalized logs if they're newer or creating */
|
||||
if (((merg->fs_seq != 0) && (merg->fin_seq > merg->fs_seq)) ||
|
||||
((merg->fs_seq == 0) && (merg->fin_count > 0))) {
|
||||
*total += merg->fin_total;
|
||||
*count += merg->fin_count;
|
||||
}
|
||||
|
||||
/* always apply active logs which must be newer than fs and finalized */
|
||||
if (merg->log_seq > 0) {
|
||||
*total += merg->log_total;
|
||||
*count += merg->log_count;
|
||||
}
|
||||
}
|
||||
24
kmod/src/totl.h
Normal file
24
kmod/src/totl.h
Normal file
@@ -0,0 +1,24 @@
|
||||
#ifndef _SCOUTFS_TOTL_H_
|
||||
#define _SCOUTFS_TOTL_H_
|
||||
|
||||
#include "key.h"
|
||||
|
||||
struct scoutfs_totl_merging {
|
||||
u64 fs_seq;
|
||||
u64 fs_total;
|
||||
u64 fs_count;
|
||||
u64 fin_seq;
|
||||
u64 fin_total;
|
||||
s64 fin_count;
|
||||
u64 log_seq;
|
||||
u64 log_total;
|
||||
s64 log_count;
|
||||
};
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg);
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic);
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count);
|
||||
|
||||
#endif
|
||||
143
kmod/src/trace/quota.h
Normal file
143
kmod/src/trace/quota.h
Normal file
@@ -0,0 +1,143 @@
|
||||
|
||||
/*
|
||||
* Tracing squota_input
|
||||
*/
|
||||
#define SQI_FMT "[%u %llu %llu %llu]"
|
||||
|
||||
#define SQI_ARGS(i) \
|
||||
(i)->op, (i)->attrs[0], (i)->attrs[1], (i)->attrs[2]
|
||||
|
||||
#define SQI_FIELDS(pref) \
|
||||
__array(__u64, pref##_attrs, SQ_NS__NR_SELECT) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQI_ASSIGN(pref, i) \
|
||||
__entry->pref##_attrs[0] = (i)->attrs[0]; \
|
||||
__entry->pref##_attrs[1] = (i)->attrs[1]; \
|
||||
__entry->pref##_attrs[2] = (i)->attrs[2]; \
|
||||
__entry->pref##_op = (i)->op;
|
||||
|
||||
#define SQI_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_op, __entry->pref##_attrs[0], \
|
||||
__entry->pref##_attrs[1], __entry->pref##_attrs[2]
|
||||
|
||||
/*
|
||||
* Tracing squota_rule
|
||||
*/
|
||||
#define SQR_FMT "[%u %llu,%u,%x %llu,%u,%x %llu,%u,%x %u %llu]"
|
||||
|
||||
#define SQR_ARGS(r) \
|
||||
(r)->prio, \
|
||||
(r)->name_val[0], (r)->name_source[0], (r)->name_flags[0], \
|
||||
(r)->name_val[1], (r)->name_source[1], (r)->name_flags[1], \
|
||||
(r)->name_val[2], (r)->name_source[2], (r)->name_flags[2], \
|
||||
(r)->op, (r)->limit \
|
||||
|
||||
#define SQR_FIELDS(pref) \
|
||||
__array(__u64, pref##_name_val, 3) \
|
||||
__field(__u64, pref##_limit) \
|
||||
__array(__u8, pref##_name_source, 3) \
|
||||
__array(__u8, pref##_name_flags, 3) \
|
||||
__field(__u8, pref##_prio) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQR_ASSIGN(pref, r) \
|
||||
__entry->pref##_name_val[0] = (r)->names[0].val; \
|
||||
__entry->pref##_name_val[1] = (r)->names[1].val; \
|
||||
__entry->pref##_name_val[2] = (r)->names[2].val; \
|
||||
__entry->pref##_limit = (r)->limit; \
|
||||
__entry->pref##_name_source[0] = (r)->names[0].source; \
|
||||
__entry->pref##_name_source[1] = (r)->names[1].source; \
|
||||
__entry->pref##_name_source[2] = (r)->names[2].source; \
|
||||
__entry->pref##_name_flags[0] = (r)->names[0].flags; \
|
||||
__entry->pref##_name_flags[1] = (r)->names[1].flags; \
|
||||
__entry->pref##_name_flags[2] = (r)->names[2].flags; \
|
||||
__entry->pref##_prio = (r)->prio; \
|
||||
__entry->pref##_op = (r)->op;
|
||||
|
||||
#define SQR_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_prio, __entry->pref##_name_val[0], \
|
||||
__entry->pref##_name_source[0], __entry->pref##_name_flags[0], \
|
||||
__entry->pref##_name_val[1], __entry->pref##_name_source[1], \
|
||||
__entry->pref##_name_flags[1], __entry->pref##_name_val[2], \
|
||||
__entry->pref##_name_source[2], __entry->pref##_name_flags[2], \
|
||||
__entry->pref##_op, __entry->pref##_limit
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_check,
|
||||
TP_PROTO(struct super_block *sb, long rs_ptr, struct squota_input *inp, int ret),
|
||||
|
||||
TP_ARGS(sb, rs_ptr, inp, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(long, rs_ptr)
|
||||
SQI_FIELDS(i)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->rs_ptr = rs_ptr;
|
||||
SQI_ASSIGN(i, inp);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rs_ptr %ld ret %d inp "SQI_FMT,
|
||||
SCSB_TRACE_ARGS, __entry->rs_ptr, __entry->ret, SQI_ENTRY_ARGS(i))
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quota_rule_op_class,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
|
||||
TP_ARGS(sb, rule, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQR_FIELDS(r)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQR_ASSIGN(r, rule);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" "SQR_FMT" ret %d",
|
||||
SCSB_TRACE_ARGS, SQR_ENTRY_ARGS(r), __entry->ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_add_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_del_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_totl_check,
|
||||
TP_PROTO(struct super_block *sb, struct squota_input *inp, struct scoutfs_key *key,
|
||||
u64 limit, int ret),
|
||||
|
||||
TP_ARGS(sb, inp, key, limit, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQI_FIELDS(i)
|
||||
sk_trace_define(k)
|
||||
__field(__u64, limit)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQI_ASSIGN(i, inp);
|
||||
sk_trace_assign(k, key);
|
||||
__entry->limit = limit;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" inp "SQI_FMT" key "SK_FMT" limit %llu ret %d",
|
||||
SCSB_TRACE_ARGS, SQI_ENTRY_ARGS(i), sk_trace_args(k), __entry->limit,
|
||||
__entry->ret)
|
||||
);
|
||||
112
kmod/src/trace/wkic.h
Normal file
112
kmod/src/trace/wkic.h
Normal file
@@ -0,0 +1,112 @@
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_wkic_wpage_class,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, ptr)
|
||||
__field(int, which)
|
||||
__field(bool, n0l)
|
||||
__field(bool, n1l)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ptr = ptr;
|
||||
__entry->which = which;
|
||||
__entry->n0l = n0l;
|
||||
__entry->n1l = n1l;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
__entry->which = which;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ptr %p wh %d nl %u,%u start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
__entry->ptr, __entry->which, __entry->n0l, __entry->n1l,
|
||||
sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_alloced,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_freeing,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_found,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_trimmed,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_erased,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserting,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserted,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_shrinking,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_dropping,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_replaying,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_filled,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_wkic_read_items,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, key, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(key)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(key, start);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" key "SK_FMT" start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
sk_trace_args(key), sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
@@ -159,6 +159,58 @@ static bool drained_holders(struct trans_info *tri)
|
||||
return holders == 0;
|
||||
}
|
||||
|
||||
static int commit_current_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
return (*str = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(*str = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(*str = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(*str = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri)) ?:
|
||||
(*str = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(*str = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(*str = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb);
|
||||
}
|
||||
|
||||
static int get_next_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
return (*str = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
}
|
||||
|
||||
static int retry_forever(struct super_block *sb, int (*func)(struct super_block *sb, char **str))
|
||||
{
|
||||
bool retrying = false;
|
||||
char *str;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
str = NULL;
|
||||
|
||||
ret = func(sb, &str);
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
str, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -ENOLINK;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work func is responsible for writing out all the dirty blocks
|
||||
* that make up the current dirty transaction. It prevents writers from
|
||||
@@ -184,8 +236,6 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
struct trans_info *tri = container_of(work, struct trans_info, write_work.work);
|
||||
struct super_block *sb = tri->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
bool retrying = false;
|
||||
char *s = NULL;
|
||||
int ret = 0;
|
||||
|
||||
tri->task = current;
|
||||
@@ -202,7 +252,7 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
ret = -ENOLINK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -214,37 +264,9 @@ void scoutfs_trans_write_func(struct work_struct *work)
|
||||
|
||||
scoutfs_inc_counter(sb, trans_commit_written);
|
||||
|
||||
do {
|
||||
ret = (s = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(s = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(s = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(s = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc,
|
||||
&tri->wri)) ?:
|
||||
(s = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(s = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(s = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb) ?:
|
||||
(s = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
s, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
/* retry {commit,get}_log_trees until they succeeed, can only fail when forcing unmount */
|
||||
ret = retry_forever(sb, commit_current_log_trees) ?:
|
||||
retry_forever(sb, get_next_log_trees);
|
||||
out:
|
||||
spin_lock(&tri->write_lock);
|
||||
tri->write_count++;
|
||||
@@ -640,6 +662,7 @@ void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
tri->write_workq = NULL;
|
||||
}
|
||||
|
||||
scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri);
|
||||
scoutfs_block_writer_forget_all(sb, &tri->wri);
|
||||
|
||||
kfree(tri);
|
||||
|
||||
@@ -39,6 +39,9 @@ struct scoutfs_triggers {
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||
};
|
||||
|
||||
@@ -90,13 +93,9 @@ int scoutfs_setup_triggers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++) {
|
||||
if (!debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i])) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++)
|
||||
debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i]);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||
SCOUTFS_TRIGGER_NR,
|
||||
};
|
||||
|
||||
@@ -46,6 +46,23 @@ static struct scoutfs_tseq_entry *tseq_rb_next(struct scoutfs_tseq_entry *ent)
|
||||
return rb_entry(node, struct scoutfs_tseq_entry, node);
|
||||
}
|
||||
|
||||
#ifdef KC_RB_TREE_AUGMENTED_COMPUTE_MAX
|
||||
static bool tseq_compute_total(struct scoutfs_tseq_entry *ent, bool exit)
|
||||
{
|
||||
loff_t total = 1 + tseq_node_total(ent->node.rb_left) +
|
||||
tseq_node_total(ent->node.rb_right);
|
||||
|
||||
if (exit && ent->total == total)
|
||||
return true;
|
||||
|
||||
ent->total = total;
|
||||
return false;
|
||||
}
|
||||
|
||||
RB_DECLARE_CALLBACKS(static, tseq_rb_callbacks, struct scoutfs_tseq_entry,
|
||||
node, total, tseq_compute_total);
|
||||
#else
|
||||
|
||||
static loff_t tseq_compute_total(struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
return 1 + tseq_node_total(ent->node.rb_left) +
|
||||
@@ -53,7 +70,8 @@ static loff_t tseq_compute_total(struct scoutfs_tseq_entry *ent)
|
||||
}
|
||||
|
||||
RB_DECLARE_CALLBACKS(static, tseq_rb_callbacks, struct scoutfs_tseq_entry,
|
||||
node, loff_t, total, tseq_compute_total)
|
||||
node, loff_t, total, tseq_compute_total);
|
||||
#endif
|
||||
|
||||
void scoutfs_tseq_tree_init(struct scoutfs_tseq_tree *tree,
|
||||
scoutfs_tseq_show_t show)
|
||||
@@ -165,6 +183,13 @@ static void *scoutfs_tseq_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
ent = tseq_rb_next(ent);
|
||||
if (ent)
|
||||
*pos = ent->pos;
|
||||
else
|
||||
/*
|
||||
* once we hit the end, *pos is never used, but it has to
|
||||
* be updated to avoid an error in bpf_seq_read()
|
||||
*/
|
||||
(*pos)++;
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
||||
@@ -17,4 +17,15 @@ static inline void down_write_two(struct rw_semaphore *a,
|
||||
down_write_nested(b, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
/*
|
||||
* When returning shrinker counts from scan_objects, we should steer
|
||||
* clear of the magic SHRINK_STOP and SHRINK_EMPTY values, which are near
|
||||
* ~0UL values. Hence, we cap count to ~0L, which is arbitarily high
|
||||
* enough to avoid it.
|
||||
*/
|
||||
static inline long shrinker_min_long(long count)
|
||||
{
|
||||
return min(count, LONG_MAX);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
1160
kmod/src/wkic.c
Normal file
1160
kmod/src/wkic.c
Normal file
File diff suppressed because it is too large
Load Diff
19
kmod/src/wkic.h
Normal file
19
kmod/src/wkic.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#ifndef _SCOUTFS_WKIC_H_
|
||||
#define _SCOUTFS_WKIC_H_
|
||||
|
||||
#include "format.h"
|
||||
|
||||
typedef int (*wkic_iter_cb_t)(struct scoutfs_key *key, void *val, unsigned int val_len,
|
||||
void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_iterate(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *last,
|
||||
struct scoutfs_key *range_start, struct scoutfs_key *range_end,
|
||||
wkic_iter_cb_t cb, void *cb_arg);
|
||||
int scoutfs_wkic_iterate_stable(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *last, struct scoutfs_key *range_start,
|
||||
struct scoutfs_key *range_end, wkic_iter_cb_t cb, void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_setup(struct super_block *sb);
|
||||
void scoutfs_wkic_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
845
kmod/src/xattr.c
845
kmod/src/xattr.c
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,39 @@
|
||||
#ifndef _SCOUTFS_XATTR_H_
|
||||
#define _SCOUTFS_XATTR_H_
|
||||
|
||||
ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
size_t size);
|
||||
int scoutfs_setxattr(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags);
|
||||
int scoutfs_removexattr(struct dentry *dentry, const char *name);
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
indx:1,
|
||||
srch:1,
|
||||
totl:1;
|
||||
};
|
||||
|
||||
extern const struct xattr_handler *scoutfs_xattr_handlers[];
|
||||
|
||||
int scoutfs_xattr_get_locked(struct inode *inode, const char *name, void *buffer, size_t size,
|
||||
struct scoutfs_lock *lck);
|
||||
int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_len,
|
||||
const void *value, size_t size, int flags,
|
||||
const struct scoutfs_xattr_prefix_tags *tgs,
|
||||
struct scoutfs_lock *lck, struct scoutfs_lock *totl_lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
ssize_t scoutfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
||||
ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
size_t size, __u32 *hash_pos, __u64 *id_pos,
|
||||
bool e_range, bool show_hidden);
|
||||
|
||||
int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
srch:1,
|
||||
totl:1;
|
||||
};
|
||||
|
||||
int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
struct scoutfs_xattr_prefix_tags *tgs);
|
||||
|
||||
void scoutfs_xattr_init_totl_key(struct scoutfs_key *key, u64 *name);
|
||||
int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len);
|
||||
|
||||
void scoutfs_xattr_indx_get_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_xattr_init_indx_key(struct scoutfs_key *key, u8 major, u64 minor, u64 ino, u64 xid);
|
||||
void scoutfs_xattr_get_indx_key(struct scoutfs_key *key, u8 *major, u64 *minor, u64 *ino, u64 *xid);
|
||||
void scoutfs_xattr_set_indx_key_xid(struct scoutfs_key *key, u64 xid);
|
||||
|
||||
#endif
|
||||
|
||||
6
tests/.gitignore
vendored
6
tests/.gitignore
vendored
@@ -1,8 +1,14 @@
|
||||
src/*.d
|
||||
src/createmany
|
||||
src/dumb_renameat2
|
||||
src/dumb_setxattr
|
||||
src/handle_cat
|
||||
src/handle_fsetxattr
|
||||
src/bulk_create_paths
|
||||
src/find_xattrs
|
||||
src/stage_tmpfile
|
||||
src/create_xattr_loop
|
||||
src/o_tmpfile_umask
|
||||
src/o_tmpfile_linkat
|
||||
src/mmap_stress
|
||||
src/mmap_validate
|
||||
|
||||
1
tests/.xfstests-branch
Normal file
1
tests/.xfstests-branch
Normal file
@@ -0,0 +1 @@
|
||||
v2022.05.01-2-g787cd20
|
||||
@@ -3,12 +3,19 @@ SHELL := /usr/bin/bash
|
||||
|
||||
# each binary command is built from a single .c file
|
||||
BIN := src/createmany \
|
||||
src/dumb_renameat2 \
|
||||
src/dumb_setxattr \
|
||||
src/handle_cat \
|
||||
src/handle_fsetxattr \
|
||||
src/bulk_create_paths \
|
||||
src/stage_tmpfile \
|
||||
src/find_xattrs \
|
||||
src/create_xattr_loop
|
||||
src/create_xattr_loop \
|
||||
src/fragmented_data_extents \
|
||||
src/o_tmpfile_umask \
|
||||
src/o_tmpfile_linkat \
|
||||
src/mmap_stress \
|
||||
src/mmap_validate
|
||||
|
||||
DEPS := $(wildcard src/*.d)
|
||||
|
||||
@@ -18,8 +25,10 @@ ifneq ($(DEPS),)
|
||||
-include $(DEPS)
|
||||
endif
|
||||
|
||||
src/mmap_stress: LIBS+=-lpthread
|
||||
|
||||
$(BIN): %: %.c Makefile
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@ $(LIBS)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
|
||||
@@ -25,8 +25,9 @@ All options can be seen by running with -h.
|
||||
This script is built to test multi-node systems on one host by using
|
||||
different mounts of the same devices. The script creates a fake block
|
||||
device in front of each fs block device for each mount that will be
|
||||
tested. Currently it will create free loop devices and will mount on
|
||||
/mnt/test.[0-9].
|
||||
tested. It will create predictable device mapper devices and mounts
|
||||
them on /mnt/test.N. These static device names and mount paths limit
|
||||
the script to a single execution per host.
|
||||
|
||||
All tests will be run by default. Particular tests can be included or
|
||||
excluded by providing test name regular expressions with the -I and -E
|
||||
@@ -104,14 +105,15 @@ used during the test.
|
||||
|
||||
| Variable | Description | Origin | Example |
|
||||
| ---------------- | ------------------- | --------------- | ----------------- |
|
||||
| T\_MB[0-9] | per-mount meta bdev | created per run | /dev/loop0 |
|
||||
| T\_DB[0-9] | per-mount data bdev | created per run | /dev/loop1 |
|
||||
| T\_MB[0-9] | per-mount meta bdev | created per run | /dev/mapper/\_scoutfs\_test\_meta\_[0-9] |
|
||||
| T\_DB[0-9] | per-mount data bdev | created per run | /dev/mapper/\_scoutfs\_test\_data\_[0-9] |
|
||||
| T\_D[0-9] | per-mount test dir | made for test | /mnt/test.[0-9]/t |
|
||||
| T\_META\_DEVICE | main FS meta bdev | -M | /dev/vda |
|
||||
| T\_DATA\_DEVICE | main FS data bdev | -D | /dev/vdb |
|
||||
| T\_EX\_META\_DEV | scratch meta bdev | -f | /dev/vdd |
|
||||
| T\_EX\_DATA\_DEV | scratch meta bdev | -e | /dev/vdc |
|
||||
| T\_M[0-9] | mount paths | mounted per run | /mnt/test.[0-9]/ |
|
||||
| T\_MODULE | built kernel module | created per run | ../kmod/src/..ko |
|
||||
| T\_NR\_MOUNTS | number of mounts | -n | 3 |
|
||||
| T\_O[0-9] | mount options | created per run | -o server\_addr= |
|
||||
| T\_QUORUM | quorum count | -q | 2 |
|
||||
|
||||
43
tests/fenced-local-force-unmount.sh
Executable file
43
tests/fenced-local-force-unmount.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
#
|
||||
# This fencing script is used for testing clusters of multiple mounts on
|
||||
# a single host. It finds mounts to fence by looking for their rids and
|
||||
# only knows how to "fence" by using forced unmount.
|
||||
#
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
log() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
[ ! -d "$fs" ] && continue
|
||||
|
||||
fs_rid="$(cat $fs/rid)" || \
|
||||
echo_fail "failed to get rid in $fs"
|
||||
if [ "$fs_rid" != "$rid" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
nr="$(cat $fs/data_device_maj_min)" || \
|
||||
echo_fail "failed to get data device major:minor in $fs"
|
||||
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET -S $nr) || \
|
||||
echo_fail "findmnt -t scoutfs -S $nr failed"
|
||||
for mnt in $mnts; do
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt failed"
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
||||
@@ -7,8 +7,9 @@ t_status_msg()
|
||||
export T_PASS_STATUS=100
|
||||
export T_SKIP_STATUS=101
|
||||
export T_FAIL_STATUS=102
|
||||
export T_SKIP_PERMITTED_STATUS=103
|
||||
export T_FIRST_STATUS="$T_PASS_STATUS"
|
||||
export T_LAST_STATUS="$T_FAIL_STATUS"
|
||||
export T_LAST_STATUS="$T_SKIP_PERMITTED_STATUS"
|
||||
|
||||
t_pass()
|
||||
{
|
||||
@@ -21,6 +22,17 @@ t_skip()
|
||||
exit $T_SKIP_STATUS
|
||||
}
|
||||
|
||||
#
|
||||
# This exit code is *reserved* for tests that are up-front never going to work
|
||||
# in certain cases. This should be expressly documented per-case and made
|
||||
# abundantly clear before merging. The test itself should document its case.
|
||||
#
|
||||
t_skip_permitted()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
exit $T_SKIP_PERMITTED_STATUS
|
||||
}
|
||||
|
||||
t_fail()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
@@ -35,10 +47,22 @@ t_fail()
|
||||
t_quiet()
|
||||
{
|
||||
echo "# $*" >> "$T_TMPDIR/quiet.log"
|
||||
"$@" > "$T_TMPDIR/quiet.log" 2>&1 || \
|
||||
"$@" >> "$T_TMPDIR/quiet.log" 2>&1 || \
|
||||
t_fail "quiet command failed"
|
||||
}
|
||||
|
||||
#
|
||||
# Quietly run a command during a test. The output is logged but only
|
||||
# the return code is printed, presumably because the output contains
|
||||
# a lot of invocation specific text that is difficult to filter.
|
||||
#
|
||||
t_rc()
|
||||
{
|
||||
echo "# $*" >> "$T_TMP.rc.log"
|
||||
"$@" >> "$T_TMP.rc.log" 2>&1
|
||||
echo "rc: $?"
|
||||
}
|
||||
|
||||
#
|
||||
# redirect test output back to the output of the invoking script intead
|
||||
# of the compared output.
|
||||
@@ -56,3 +80,15 @@ t_compare_output()
|
||||
{
|
||||
"$@" >&7 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# usually bash prints an annoying output message when jobs
|
||||
# are killed. We can avoid that by redirecting stderr for
|
||||
# the bash process when it reaps the jobs that are killed.
|
||||
#
|
||||
t_silent_kill() {
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
kill "$@"
|
||||
wait "$@"
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
}
|
||||
|
||||
@@ -6,6 +6,61 @@ t_filter_fs()
|
||||
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
}
|
||||
|
||||
#
|
||||
# We can hit a spurious kasan warning that was fixed upstream:
|
||||
#
|
||||
# e504e74cc3a2 x86/unwind/orc: Disable KASAN checking in the ORC unwinder, part 2
|
||||
#
|
||||
# KASAN can get mad when the unwinder doesn't find ORC metadata and
|
||||
# wanders up without using frames and hits the KASAN stack red zones.
|
||||
# We can ignore these messages.
|
||||
#
|
||||
# They're bracketed by:
|
||||
# [ 2687.690127] ==================================================================
|
||||
# [ 2687.691366] BUG: KASAN: stack-out-of-bounds in get_reg+0x1bc/0x230
|
||||
# ...
|
||||
# [ 2687.706220] ==================================================================
|
||||
# [ 2687.707284] Disabling lock debugging due to kernel taint
|
||||
#
|
||||
# That final lock debugging message may not be included.
|
||||
#
|
||||
ignore_harmless_unwind_kasan_stack_oob()
|
||||
{
|
||||
awk '
|
||||
BEGIN {
|
||||
in_soob = 0
|
||||
soob_nr = 0
|
||||
}
|
||||
( !in_soob && $0 ~ /==================================================================/ ) {
|
||||
in_soob = 1
|
||||
soob_nr = NR
|
||||
saved = $0
|
||||
}
|
||||
( in_soob == 1 && NR == (soob_nr + 1) ) {
|
||||
if (match($0, /KASAN: stack-out-of-bounds in get_reg/) != 0) {
|
||||
in_soob = 2
|
||||
} else {
|
||||
in_soob = 0
|
||||
print saved
|
||||
}
|
||||
saved=""
|
||||
}
|
||||
( in_soob == 2 && $0 ~ /==================================================================/ ) {
|
||||
in_soob = 3
|
||||
soob_nr = NR
|
||||
}
|
||||
( in_soob == 3 && NR > soob_nr && $0 !~ /Disabling lock debugging/ ) {
|
||||
in_soob = 0
|
||||
}
|
||||
( !in_soob ) { print $0 }
|
||||
END {
|
||||
if (saved) {
|
||||
print saved
|
||||
}
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
#
|
||||
# Filter out expected messages. Putting messages here implies that
|
||||
# tests aren't relying on messages to discover failures.. they're
|
||||
@@ -18,6 +73,7 @@ t_filter_dmesg()
|
||||
|
||||
# the kernel can just be noisy
|
||||
re=" used greatest stack depth: "
|
||||
re="$re|sched: RT throttling activated"
|
||||
|
||||
# mkfs/mount checks partition tables
|
||||
re="$re|unknown partition table"
|
||||
@@ -56,8 +112,12 @@ t_filter_dmesg()
|
||||
re="$re|scoutfs .*: all clients recovered"
|
||||
re="$re|scoutfs .* error: client rid.*lock recovery timed out"
|
||||
|
||||
# some tests mount w/o options
|
||||
# we test bad devices and options
|
||||
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
|
||||
re="$re|scoutfs .* error: meta_super META flag not set"
|
||||
re="$re|scoutfs .* error: could not open metadev:.*"
|
||||
re="$re|scoutfs .* error: Unknown or malformed option,.*"
|
||||
re="$re|scoutfs .* error: invalid quorum_heartbeat_timeout_ms value"
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
@@ -72,6 +132,40 @@ t_filter_dmesg()
|
||||
re="$re|scoutfs .* error reading quorum block"
|
||||
re="$re|scoutfs .* error .* writing quorum block"
|
||||
re="$re|scoutfs .* error .* while checking to delete inode"
|
||||
re="$re|scoutfs .* error .*writing btree blocks.*"
|
||||
re="$re|scoutfs .* error .*writing super block.*"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.looping commit del.*upd freeing item"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.final commit del.upd freeing item"
|
||||
re="$re|scoutfs .* error .*reading quorum block.*to update event.*"
|
||||
re="$re|scoutfs .* error.*server failed to bind to.*"
|
||||
re="$re|scoutfs .* critical transaction commit failure.*"
|
||||
|
||||
egrep -v "($re)"
|
||||
# ENOLINK (-67) indicates an expected forced unmount error
|
||||
re="$re|scoutfs .* error -67 .*"
|
||||
|
||||
# change-devices causes loop device resizing
|
||||
re="$re|loop: module loaded"
|
||||
re="$re|loop[0-9].* detected capacity change from.*"
|
||||
re="$re|dm-[0-9].* detected capacity change from.*"
|
||||
|
||||
# ignore systemd-journal rotating
|
||||
re="$re|systemd-journald.*"
|
||||
|
||||
# process accounting can be noisy
|
||||
re="$re|Process accounting resumed.*"
|
||||
|
||||
# format vers back/compat tries bad mounts
|
||||
re="$re|scoutfs .* error.*outside of supported version.*"
|
||||
re="$re|scoutfs .* error.*could not get .*super.*"
|
||||
|
||||
# ignore "unsafe core pattern" when xfstests tries to disable cores"
|
||||
re="$re|Unsafe core_pattern used with fs.suid_dumpable=2.*"
|
||||
re="$re|Pipe handler or fully qualified core dump path required.*"
|
||||
re="$re|Set kernel.core_pattern before fs.suid_dumpable.*"
|
||||
|
||||
# perf warning that it adjusted sample rate
|
||||
re="$re|perf: interrupt took too long.*lowering kernel.perf_event_max_sample_rate.*"
|
||||
|
||||
egrep -v "($re)" | \
|
||||
ignore_harmless_unwind_kasan_stack_oob
|
||||
}
|
||||
|
||||
@@ -29,13 +29,12 @@ t_mount_rid()
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given path
|
||||
# in a mounted scoutfs volume.
|
||||
#
|
||||
t_ident()
|
||||
t_ident_from_mnt()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
local mnt="$1"
|
||||
local fsid
|
||||
local rid
|
||||
|
||||
@@ -45,6 +44,38 @@ t_ident()
|
||||
echo "f.${fsid:0:6}.r.${rid:0:6}"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
#
|
||||
t_ident()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
|
||||
t_ident_from_mnt "$mnt"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_ident()
|
||||
{
|
||||
local ident="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$ident"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_mnt()
|
||||
{
|
||||
local mnt="$1"
|
||||
|
||||
t_sysfs_path_from_ident $(t_ident_from_mnt $mnt)
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount's sysfs path, defaulting to mount 0 if none is
|
||||
# specified.
|
||||
@@ -53,7 +84,7 @@ t_sysfs_path()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$(t_ident $nr)"
|
||||
t_sysfs_path_from_ident $(t_ident $nr)
|
||||
}
|
||||
|
||||
#
|
||||
@@ -75,6 +106,29 @@ t_fs_nrs()
|
||||
seq 0 $((T_NR_MOUNTS - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# output the fs nrs of quorum nodes, we "know" that
|
||||
# the quorum nrs are the first consequtive nrs
|
||||
#
|
||||
t_quorum_nrs()
|
||||
{
|
||||
seq 0 $((T_QUORUM - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# outputs "1" if the fs number has "1" in its quorum/is_leader file.
|
||||
# All other cases output 0, including the fs nr being a client which
|
||||
# won't have a quorum/ dir.
|
||||
#
|
||||
t_fs_is_leader()
|
||||
{
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader 2>/dev/null)" == "1" ]; then
|
||||
echo "1"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount nr of the current server. This takes no steps to
|
||||
# ensure that the server doesn't shut down and have some other mount
|
||||
@@ -83,7 +137,7 @@ t_fs_nrs()
|
||||
t_server_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "1" ]; then
|
||||
if [ "$(t_fs_is_leader $i)" == "1" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -101,7 +155,7 @@ t_server_nr()
|
||||
t_first_client_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "0" ]; then
|
||||
if [ "$(t_fs_is_leader $i)" == "0" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -130,7 +184,27 @@ t_mount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet mount -t scoutfs \$T_O$nr \$T_DB$nr \$T_M$nr
|
||||
eval t_quiet mount -t scoutfs \$T_O$nr\$opt \$T_DB$nr \$T_M$nr
|
||||
}
|
||||
|
||||
#
|
||||
# Mount with an optional mount option string. If the string is empty
|
||||
# then the saved mount options are used. If the string has contents
|
||||
# then it is appended to the end of the saved options with a separating
|
||||
# comma.
|
||||
#
|
||||
# Unlike t_mount this won't inherently fail in t_quiet, errors are
|
||||
# returned so bad options can be tested.
|
||||
#
|
||||
t_mount_opt()
|
||||
{
|
||||
local nr="$1"
|
||||
local opt="${2:+,$2}"
|
||||
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval mount -t scoutfs \$T_O$nr\$opt \$T_DB$nr \$T_M$nr
|
||||
}
|
||||
|
||||
t_umount()
|
||||
@@ -222,6 +296,15 @@ t_trigger_get() {
|
||||
cat "$(t_trigger_path "$nr")/$which"
|
||||
}
|
||||
|
||||
t_trigger_set() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local val="$3"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo "$val" > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_show() {
|
||||
local which="$1"
|
||||
local string="$2"
|
||||
@@ -233,9 +316,8 @@ t_trigger_show() {
|
||||
t_trigger_arm_silent() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo 1 > "$path/$which"
|
||||
t_trigger_set "$which" "$nr" 1
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
@@ -362,3 +444,57 @@ t_wait_for_leader() {
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
t_get_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
cat "$opt"
|
||||
}
|
||||
|
||||
t_set_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local val="$3"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
echo "$val" > "$opt" 2>/dev/null
|
||||
}
|
||||
|
||||
t_set_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local val="$2"
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
t_set_sysfs_mount_option $i $name $val
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _saved_opts
|
||||
t_save_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local opt
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
opt="$(t_sysfs_path $i)/mount_options/$name"
|
||||
ind="${name}_${i}"
|
||||
|
||||
_saved_opts[$ind]="$(cat $opt)"
|
||||
done
|
||||
}
|
||||
|
||||
t_restore_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
ind="${name}_${i}"
|
||||
|
||||
t_set_sysfs_mount_option $i $name "${_saved_opts[$ind]}"
|
||||
done
|
||||
}
|
||||
|
||||
88
tests/funcs/tap.sh
Normal file
88
tests/funcs/tap.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
#
|
||||
# Generate TAP format test results
|
||||
#
|
||||
|
||||
t_tap_header()
|
||||
{
|
||||
local runid=$1
|
||||
local sequence=( $(echo $tests) )
|
||||
local count=${#sequence[@]}
|
||||
|
||||
# avoid recreating the same TAP result over again - harness sets this
|
||||
[[ -z "$runid" ]] && runid="*test*"
|
||||
|
||||
cat > $T_RESULTS/scoutfs.tap <<TAPEOF
|
||||
TAP version 14
|
||||
1..${count}
|
||||
#
|
||||
# TAP results for run ${runid}
|
||||
#
|
||||
# host/run info:
|
||||
#
|
||||
# hostname: ${HOSTNAME}
|
||||
# test start time: $(date --utc)
|
||||
# uname -r: $(uname -r)
|
||||
# scoutfs commit id: $(git describe --tags)
|
||||
#
|
||||
# sequence for this run:
|
||||
#
|
||||
TAPEOF
|
||||
|
||||
# Sequence
|
||||
for t in ${tests}; do
|
||||
echo ${t/.sh/}
|
||||
done | cat -n | expand | column -c 120 | expand | sed 's/^ /#/' >> $T_RESULTS/scoutfs.tap
|
||||
echo "#" >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
|
||||
t_tap_progress()
|
||||
{
|
||||
(
|
||||
local i=$(( testcount + 1 ))
|
||||
local testname=$1
|
||||
local result=$2
|
||||
|
||||
local diff=""
|
||||
local dmsg=""
|
||||
|
||||
if [[ -s "$T_RESULTS/tmp/${testname}/dmesg.new" ]]; then
|
||||
dmsg="1"
|
||||
fi
|
||||
|
||||
if ! cmp -s golden/${testname} $T_RESULTS/output/${testname}; then
|
||||
diff="1"
|
||||
fi
|
||||
|
||||
if [[ "${result}" == "100" ]] && [[ -z "${dmsg}" ]] && [[ -z "${diff}" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
elif [[ "${result}" == "103" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
echo "# ${testname} ** skipped - permitted **"
|
||||
else
|
||||
echo "not ok ${i} - ${testname}"
|
||||
case ${result} in
|
||||
101)
|
||||
echo "# ${testname} ** skipped **"
|
||||
;;
|
||||
102)
|
||||
echo "# ${testname} ** failed **"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "#"
|
||||
echo "# diff:"
|
||||
echo "#"
|
||||
diff -u golden/${testname} $T_RESULTS/output/${testname} | expand | sed 's/^/# /'
|
||||
fi
|
||||
|
||||
if [[ -n "${dmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# dmesg:"
|
||||
echo "#"
|
||||
cat "$T_RESULTS/tmp/${testname}/dmesg.new" | sed 's/^/# /'
|
||||
fi
|
||||
fi
|
||||
) >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
6
tests/golden/basic-bad-mounts
Normal file
6
tests/golden/basic-bad-mounts
Normal file
@@ -0,0 +1,6 @@
|
||||
== prepare devices, mount point, and logs
|
||||
== bad devices, bad options
|
||||
== swapped devices
|
||||
== both meta devices
|
||||
== both data devices
|
||||
== good volume, bad option and good options
|
||||
155
tests/golden/basic-posix-acl
Normal file
155
tests/golden/basic-posix-acl
Normal file
@@ -0,0 +1,155 @@
|
||||
== setup test directory
|
||||
== getfacl
|
||||
directory drwxr-xr-x 0 0 0 '.'
|
||||
# file: .
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
|
||||
== basic non-acl access through permissions
|
||||
directory drwxr-xr-x 0 44444 0 'dir-testuid'
|
||||
touch: cannot touch 'dir-testuid/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-testuid/symlink-file-group-write': Permission denied
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'dir-testuid/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'symlinkdir-testuid/symlink-file-group-write'
|
||||
== basic acl access
|
||||
directory drwxr-xr-x 0 0 0 'dir-root'
|
||||
touch: cannot touch 'dir-root/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-root/file-group-write': Permission denied
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
regular empty file -rw-r--r-- 22222 0 0 'dir-root/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 0 0 'symlinkdir-root/file-group-write'
|
||||
== directory exec
|
||||
Success
|
||||
Success
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Failed
|
||||
Failed
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
group:44444:rwx
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Success
|
||||
Success
|
||||
== get/set attr
|
||||
regular empty file -rw-r--r-- 0 0 0 'file-root'
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rw-
|
||||
group::r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
mask::r--
|
||||
other::r--
|
||||
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
group:44444:rw-
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
user.test4="Success"
|
||||
|
||||
== inheritance / default acl
|
||||
directory drwxr-xr-x 0 0 0 'dir-root2'
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
directory drwxrwxr-x 22222 0 4 'dir-root2/dir'
|
||||
# file: dir-root2/dir
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
regular empty file -rw-rw-r-- 22222 0 0 'dir-root2/dir/file'
|
||||
# file: dir-root2/dir/file
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rwx #effective:rw-
|
||||
group::r-x #effective:r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
== cleanup
|
||||
@@ -47,11 +47,13 @@ four
|
||||
--- dir within dir
|
||||
--- overwrite file
|
||||
--- can't overwrite non-empty dir
|
||||
mv: cannot move ‘/mnt/test/test/basic-posix-consistency/dir/c/clobber’ to ‘/mnt/test/test/basic-posix-consistency/dir/a/dir’: Directory not empty
|
||||
mv: cannot move '/mnt/test/test/basic-posix-consistency/dir/c/clobber' to '/mnt/test/test/basic-posix-consistency/dir/a/dir': Directory not empty
|
||||
--- can overwrite empty dir
|
||||
--- can rename into root
|
||||
== path resoluion
|
||||
== inode indexes match after syncing existing
|
||||
== inode indexes match after copying and syncing
|
||||
== inode indexes match after removing and syncing
|
||||
== concurrent creates make one file
|
||||
one-file
|
||||
== cleanup
|
||||
|
||||
6
tests/golden/basic-truncate
Normal file
6
tests/golden/basic-truncate
Normal file
@@ -0,0 +1,6 @@
|
||||
== truncate writes zeroed partial end of file block
|
||||
0000000 0a79 0a79 0a79 0a79 0a79 0a79 0a79 0a79
|
||||
*
|
||||
0006144 0000 0000 0000 0000 0000 0000 0000 0000
|
||||
*
|
||||
0012288
|
||||
@@ -1,52 +1,2 @@
|
||||
== create shared test file
|
||||
== set and get xattrs between mount pairs while retrying
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="1"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="2"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="3"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="4"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="5"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="6"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="7"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="8"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="9"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="10"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
== Issue scoutfs df to force block reads to trigger stale invalidation/retry
|
||||
counter block_cache_remove_stale changed
|
||||
|
||||
28
tests/golden/change-devices
Normal file
28
tests/golden/change-devices
Normal file
@@ -0,0 +1,28 @@
|
||||
== make tmp sparse data dev files
|
||||
== make scratch fs
|
||||
== small new data device fails
|
||||
rc: 1
|
||||
== check sees data device errors
|
||||
rc: 1
|
||||
rc: 0
|
||||
== preparing while mounted fails
|
||||
rc: 1
|
||||
== preparing without recovery fails
|
||||
rc: 1
|
||||
== check sees metadata errors
|
||||
rc: 1
|
||||
rc: 1
|
||||
== preparing with file data fails
|
||||
rc: 1
|
||||
== preparing after emptied
|
||||
rc: 0
|
||||
== checks pass
|
||||
rc: 0
|
||||
rc: 0
|
||||
== using prepared
|
||||
== preparing larger and resizing
|
||||
rc: 0
|
||||
equal_prepared
|
||||
large_prepared
|
||||
resized larger test rc: 0
|
||||
== cleanup
|
||||
1
tests/golden/client-unmount-recovery
Normal file
1
tests/golden/client-unmount-recovery
Normal file
@@ -0,0 +1 @@
|
||||
== 60s of unmounting non-quorum clients during recovery
|
||||
@@ -1,3 +1,4 @@
|
||||
== measure initial createmany
|
||||
== measure initial createmany
|
||||
== measure two concurrent createmany runs
|
||||
== cleanup
|
||||
|
||||
330
tests/golden/data-prealloc
Normal file
330
tests/golden/data-prealloc
Normal file
@@ -0,0 +1,330 @@
|
||||
== initial writes smaller than prealloc grow to prealloc size
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 7
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 7
|
||||
== larger files get full prealloc extents
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 9
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 9
|
||||
== non-streaming writes with contig have per-block extents
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 32
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 32
|
||||
== any writes to region prealloc get full extents
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
== streaming offline writes get full extents either way
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 4
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 4
|
||||
== goofy preallocation amounts work
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 6
|
||||
/mnt/test/test/data-prealloc/file-1: extents: 3
|
||||
/mnt/test/test/data-prealloc/file-2: extents: 3
|
||||
== block writes into region allocs hole
|
||||
wrote blk 24
|
||||
wrote blk 32
|
||||
wrote blk 40
|
||||
wrote blk 55
|
||||
wrote blk 63
|
||||
wrote blk 71
|
||||
wrote blk 72
|
||||
wrote blk 79
|
||||
wrote blk 80
|
||||
wrote blk 87
|
||||
wrote blk 88
|
||||
wrote blk 95
|
||||
before:
|
||||
24.. 1:
|
||||
32.. 1:
|
||||
40.. 1:
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 0 at pos 0
|
||||
wrote blk 0
|
||||
0.. 1:
|
||||
1.. 7: unwritten
|
||||
24.. 1:
|
||||
32.. 1:
|
||||
40.. 1:
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 0 at pos 1
|
||||
wrote blk 15
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
24.. 1:
|
||||
32.. 1:
|
||||
40.. 1:
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 0 at pos 2
|
||||
wrote blk 19
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
32.. 1:
|
||||
40.. 1:
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 1 at pos 0
|
||||
wrote blk 25
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
40.. 1:
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 1 at pos 1
|
||||
wrote blk 39
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 1 at pos 2
|
||||
wrote blk 44
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 2 at pos 0
|
||||
wrote blk 48
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
48.. 1:
|
||||
49.. 6: unwritten
|
||||
55.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 2 at pos 1
|
||||
wrote blk 62
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
48.. 1:
|
||||
49.. 6: unwritten
|
||||
55.. 1:
|
||||
56.. 6: unwritten
|
||||
62.. 1:
|
||||
63.. 1:
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 2 at pos 2
|
||||
wrote blk 67
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
48.. 1:
|
||||
49.. 6: unwritten
|
||||
55.. 1:
|
||||
56.. 6: unwritten
|
||||
62.. 1:
|
||||
63.. 1:
|
||||
64.. 3: unwritten
|
||||
67.. 1:
|
||||
68.. 3: unwritten
|
||||
71.. 2:
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 3 at pos 0
|
||||
wrote blk 73
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
48.. 1:
|
||||
49.. 6: unwritten
|
||||
55.. 1:
|
||||
56.. 6: unwritten
|
||||
62.. 1:
|
||||
63.. 1:
|
||||
64.. 3: unwritten
|
||||
67.. 1:
|
||||
68.. 3: unwritten
|
||||
71.. 2:
|
||||
73.. 1:
|
||||
74.. 5: unwritten
|
||||
79.. 2:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 3 at pos 1
|
||||
wrote blk 86
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
48.. 1:
|
||||
49.. 6: unwritten
|
||||
55.. 1:
|
||||
56.. 6: unwritten
|
||||
62.. 1:
|
||||
63.. 1:
|
||||
64.. 3: unwritten
|
||||
67.. 1:
|
||||
68.. 3: unwritten
|
||||
71.. 2:
|
||||
73.. 1:
|
||||
74.. 5: unwritten
|
||||
79.. 2:
|
||||
86.. 1:
|
||||
87.. 2:
|
||||
95.. 1: eof
|
||||
writing into existing 3 at pos 2
|
||||
wrote blk 92
|
||||
0.. 1:
|
||||
1.. 14: unwritten
|
||||
15.. 1:
|
||||
16.. 3: unwritten
|
||||
19.. 1:
|
||||
20.. 4: unwritten
|
||||
24.. 1:
|
||||
25.. 1:
|
||||
26.. 6: unwritten
|
||||
32.. 1:
|
||||
39.. 1:
|
||||
40.. 1:
|
||||
44.. 1:
|
||||
45.. 3: unwritten
|
||||
48.. 1:
|
||||
49.. 6: unwritten
|
||||
55.. 1:
|
||||
56.. 6: unwritten
|
||||
62.. 1:
|
||||
63.. 1:
|
||||
64.. 3: unwritten
|
||||
67.. 1:
|
||||
68.. 3: unwritten
|
||||
71.. 2:
|
||||
73.. 1:
|
||||
74.. 5: unwritten
|
||||
79.. 2:
|
||||
86.. 1:
|
||||
87.. 2:
|
||||
92.. 1:
|
||||
93.. 2: unwritten
|
||||
95.. 1: eof
|
||||
3
tests/golden/fallocate
Normal file
3
tests/golden/fallocate
Normal file
@@ -0,0 +1,3 @@
|
||||
== creating reasonably large per-mount files
|
||||
== 10s of racing cold reads and fallocate nop
|
||||
== cleaning up files
|
||||
4
tests/golden/format-version-forward-back
Normal file
4
tests/golden/format-version-forward-back
Normal file
@@ -0,0 +1,4 @@
|
||||
== ensuring utils and module for old versions
|
||||
== unmounting test fs and removing test module
|
||||
== testing combinations of old and new format versions
|
||||
== restoring test module and mount
|
||||
18
tests/golden/get-referring-entries
Normal file
18
tests/golden/get-referring-entries
Normal file
@@ -0,0 +1,18 @@
|
||||
== root inode returns nothing
|
||||
== crazy large unused inode does nothing
|
||||
== basic entry
|
||||
file
|
||||
== rename
|
||||
renamed
|
||||
== hard link
|
||||
file
|
||||
link
|
||||
== removal
|
||||
== different dirs
|
||||
== file types
|
||||
type b name block
|
||||
type c name char
|
||||
type d name dir
|
||||
type f name file
|
||||
type l name symlink
|
||||
== all name lengths work
|
||||
@@ -17,7 +17,7 @@ ino not found in dseq index
|
||||
mount 0 contents after mount 1 rm: contents
|
||||
ino found in dseq index
|
||||
ino found in dseq index
|
||||
stat: cannot stat ‘/mnt/test/test/inode-deletion/file’: No such file or directory
|
||||
stat: cannot stat '/mnt/test/test/inode-deletion/file': No such file or directory
|
||||
ino not found in dseq index
|
||||
ino not found in dseq index
|
||||
== lots of deletions use one open map
|
||||
|
||||
3
tests/golden/large-fragmented-free
Normal file
3
tests/golden/large-fragmented-free
Normal file
@@ -0,0 +1,3 @@
|
||||
== creating fragmented extents
|
||||
== unlink file with moved extents to free extents per block
|
||||
== cleanup
|
||||
3
tests/golden/lock-recover-invalidate
Normal file
3
tests/golden/lock-recover-invalidate
Normal file
@@ -0,0 +1,3 @@
|
||||
== starting background invalidating read/write load
|
||||
== 60s of lock recovery during invalidating load
|
||||
== stopping background load
|
||||
0
tests/golden/lock-rever-invalidate
Normal file
0
tests/golden/lock-rever-invalidate
Normal file
2
tests/golden/lock-shrink-read-race
Normal file
2
tests/golden/lock-shrink-read-race
Normal file
@@ -0,0 +1,2 @@
|
||||
=== setup
|
||||
=== spin reading and shrinking
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user