mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-18 01:12:51 +00:00
Compare commits
731 Commits
zab/coding
...
greg/ci-up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c18d1240e | ||
|
|
7865ee9f54 | ||
|
|
624eb128c6 | ||
|
|
091eb3b683 | ||
|
|
04e8cc6295 | ||
|
|
0f6fdb3eb5 | ||
|
|
2f48a606e8 | ||
|
|
377e49caf1 | ||
|
|
d08eb66adc | ||
|
|
6f19d0bd36 | ||
|
|
1d0cde7cc3 | ||
|
|
138c7c6b49 | ||
|
|
8aa1a98901 | ||
|
|
888b1394a6 | ||
|
|
e457694f19 | ||
|
|
459de5b478 | ||
|
|
24031cde1d | ||
|
|
04cc41719c | ||
|
|
1b47e9429e | ||
|
|
7ea084082d | ||
|
|
f565451f76 | ||
|
|
05f14640fb | ||
|
|
609fc56cd6 | ||
|
|
a4b5a256eb | ||
|
|
f701ce104c | ||
|
|
c6dab3c306 | ||
|
|
e3e2cfceec | ||
|
|
5a10c79409 | ||
|
|
e9d147260c | ||
|
|
6c85879489 | ||
|
|
8b76a53cf3 | ||
|
|
e76a171c40 | ||
|
|
8cb08507d6 | ||
|
|
cad12d5ce8 | ||
|
|
e59a5f8ebd | ||
|
|
1bcd1d4d00 | ||
|
|
b944f609aa | ||
|
|
519b47a53c | ||
|
|
92f704d35a | ||
|
|
311bf75902 | ||
|
|
3788d67101 | ||
|
|
b7a3d03711 | ||
|
|
295f751aed | ||
|
|
7f6032d9b4 | ||
|
|
7e3a6537ec | ||
|
|
49b7b70438 | ||
|
|
de0fdd1f9f | ||
|
|
a6d7de3c00 | ||
|
|
2c2c127c5e | ||
|
|
9491c784e7 | ||
|
|
c3b30930fa | ||
|
|
e7e46a80e6 | ||
|
|
1ddf752f42 | ||
|
|
14b65c6360 | ||
|
|
934f6c7648 | ||
|
|
a88972b50e | ||
|
|
3e71f49260 | ||
|
|
8a082e3f99 | ||
|
|
110d5ea0d5 | ||
|
|
669de459a7 | ||
|
|
621271f8cf | ||
|
|
d1092cdbe9 | ||
|
|
aed7169fac | ||
|
|
7f313f2818 | ||
|
|
6b4e666952 | ||
|
|
4a26059d00 | ||
|
|
19e78c32fc | ||
|
|
8c1a45c9f5 | ||
|
|
5a6eb569f3 | ||
|
|
69d9040e68 | ||
|
|
d94ec29ffa | ||
|
|
70c36ae394 | ||
|
|
1d08a58add | ||
|
|
fc7876e844 | ||
|
|
5337b9e221 | ||
|
|
8a22bdd366 | ||
|
|
235ab133a7 | ||
|
|
9335d2eb86 | ||
|
|
97b081de3f | ||
|
|
21b5032365 | ||
|
|
4723f4f9ab | ||
|
|
0a8b3f4e94 | ||
|
|
8a4b0967cb | ||
|
|
606c519e96 | ||
|
|
7d0e7e29f8 | ||
|
|
69de6d7a74 | ||
|
|
ac00f5cedb | ||
|
|
6d42d260cf | ||
|
|
00ebe92186 | ||
|
|
570c05898c | ||
|
|
c298360a49 | ||
|
|
95f4e56546 | ||
|
|
d5c2768f04 | ||
|
|
676d429264 | ||
|
|
5b260e6b54 | ||
|
|
e2b06f2c92 | ||
|
|
546b437df7 | ||
|
|
381f4543b7 | ||
|
|
418a441604 | ||
|
|
f3abf9710b | ||
|
|
8a45c2baff | ||
|
|
345ebd0876 | ||
|
|
b718cf09de | ||
|
|
e4721366ff | ||
|
|
4ef64c6fcf | ||
|
|
2d58ee2a37 | ||
|
|
1f0dd7f025 | ||
|
|
077468ac1e | ||
|
|
c951713ab2 | ||
|
|
ad82a5e52a | ||
|
|
d3c5328909 | ||
|
|
c30172210f | ||
|
|
19af6e28fb | ||
|
|
8885486bc8 | ||
|
|
0204e092e4 | ||
|
|
3b816cfd01 | ||
|
|
b45fbe0bbb | ||
|
|
c5d9b93b96 | ||
|
|
2984f4d3a8 | ||
|
|
3b8d2eab8e | ||
|
|
4dde57dc27 | ||
|
|
a4be74f4b1 | ||
|
|
b66e52f3f8 | ||
|
|
fb93d82b1e | ||
|
|
9d8ac2c7d7 | ||
|
|
49acbb4415 | ||
|
|
7b039a1d18 | ||
|
|
ccd65b9a61 | ||
|
|
aeb1dbc5f5 | ||
|
|
e20d3ae1e8 | ||
|
|
3228749957 | ||
|
|
db445ce517 | ||
|
|
bb5d98730b | ||
|
|
cb0838a0ef | ||
|
|
7eaed848ed | ||
|
|
267c1cc2d5 | ||
|
|
c6b92329b3 | ||
|
|
91e7f051cf | ||
|
|
7645f04363 | ||
|
|
8c06302984 | ||
|
|
1bc83e9e2d | ||
|
|
38c6d66ffc | ||
|
|
6a17dc335f | ||
|
|
e0bb6ca481 | ||
|
|
38e6f11ee4 | ||
|
|
442980f1c9 | ||
|
|
82c2d0b1d0 | ||
|
|
4a8240748e | ||
|
|
60ca950f42 | ||
|
|
9c45e8b7ef | ||
|
|
ee9e8c3e1a | ||
|
|
5f156b7a36 | ||
|
|
3a51ca369b | ||
|
|
460f3ce503 | ||
|
|
fb5331a1d9 | ||
|
|
5a53e7144d | ||
|
|
a23877b150 | ||
|
|
5ccdf3c9f0 | ||
|
|
270726a6ea | ||
|
|
de304628ea | ||
|
|
6a99ca9ede | ||
|
|
0521bd0e6b | ||
|
|
361491846d | ||
|
|
9ba4271c26 | ||
|
|
90cfaf17d1 | ||
|
|
6931cb7b0e | ||
|
|
7d4db05445 | ||
|
|
7b71250072 | ||
|
|
8e37be279c | ||
|
|
d6642da44d | ||
|
|
4b87045447 | ||
|
|
3f773a8594 | ||
|
|
c385eea9a1 | ||
|
|
c296bc1959 | ||
|
|
3052feac29 | ||
|
|
1fa0d7727c | ||
|
|
2af6f47c8b | ||
|
|
6db69b7a4f | ||
|
|
8ca1f1994d | ||
|
|
48716461e4 | ||
|
|
965b692bdc | ||
|
|
c3c4b08038 | ||
|
|
0519830229 | ||
|
|
4d6e1a14ae | ||
|
|
fc3e061ea8 | ||
|
|
a4bc3fb27d | ||
|
|
67990a7007 | ||
|
|
ba819be8f9 | ||
|
|
1b103184ca | ||
|
|
c3890abd7b | ||
|
|
5ab38bfa48 | ||
|
|
e9ad61b444 | ||
|
|
91bbf90f71 | ||
|
|
b5630f540d | ||
|
|
90a4c82363 | ||
|
|
f654fa0fda | ||
|
|
50168a2d2a | ||
|
|
3c0616524a | ||
|
|
8d3e6883c6 | ||
|
|
8747dae61c | ||
|
|
fffcf4a9bb | ||
|
|
b552406427 | ||
|
|
d812599e6b | ||
|
|
03ab5cedb6 | ||
|
|
2b94cd6468 | ||
|
|
5507ee5351 | ||
|
|
1600a121d9 | ||
|
|
6daf24ff37 | ||
|
|
cd5d9ff3e0 | ||
|
|
d94e49eb63 | ||
|
|
1dbe408539 | ||
|
|
bf21699ad7 | ||
|
|
c7c67a173d | ||
|
|
0d10189f58 | ||
|
|
6b88f3268e | ||
|
|
4b2afa61b8 | ||
|
|
222ba2cede | ||
|
|
c7e97eeb1f | ||
|
|
21c070b42d | ||
|
|
77fbf92968 | ||
|
|
d5c699c3b4 | ||
|
|
b56b8e502c | ||
|
|
5ff372561d | ||
|
|
bdecee5e5d | ||
|
|
a9281b75fa | ||
|
|
707e1b2d59 | ||
|
|
006f429f72 | ||
|
|
d71583bcf5 | ||
|
|
bb835b948d | ||
|
|
bcdc4f5423 | ||
|
|
7ceb215c91 | ||
|
|
d4d2b0850b | ||
|
|
cf05aefe50 | ||
|
|
9f06065ce7 | ||
|
|
d2c2fece2a | ||
|
|
0e1e55d25b | ||
|
|
293cee9554 | ||
|
|
a7704e0b56 | ||
|
|
819df4be60 | ||
|
|
592e3d471f | ||
|
|
29160b0bc6 | ||
|
|
11c041d2ea | ||
|
|
46e8dfe884 | ||
|
|
a9beeaf5da | ||
|
|
205d8ebd4a | ||
|
|
e580f33f82 | ||
|
|
d480243c11 | ||
|
|
bafecbc604 | ||
|
|
65be4682e3 | ||
|
|
e88845d185 | ||
|
|
ec50e66fff | ||
|
|
0e91f9a277 | ||
|
|
69068ae2c0 | ||
|
|
016dac39bf | ||
|
|
e69cf3dec8 | ||
|
|
d6c143a639 | ||
|
|
09ae100254 | ||
|
|
50f5077863 | ||
|
|
cca4fcb788 | ||
|
|
1d150da3f0 | ||
|
|
28f03d3558 | ||
|
|
4275f6e6e5 | ||
|
|
70a5b6ffe2 | ||
|
|
b89ecd47b4 | ||
|
|
4293816764 | ||
|
|
f0de59a9a3 | ||
|
|
1f0a08eacb | ||
|
|
dac3f056a5 | ||
|
|
af868aad9b | ||
|
|
cf4df0ef9f | ||
|
|
81aa58253e | ||
|
|
c683ded0e6 | ||
|
|
f27431b3ae | ||
|
|
28c3cee995 | ||
|
|
430960ef3c | ||
|
|
7006a84d96 | ||
|
|
eafb8621da | ||
|
|
006555d42a | ||
|
|
8e458f9230 | ||
|
|
32c0dbce09 | ||
|
|
9c9ba651bd | ||
|
|
14eddb6420 | ||
|
|
597208324d | ||
|
|
8596c9ad45 | ||
|
|
8a705ea380 | ||
|
|
4784ccdfd5 | ||
|
|
778c2769df | ||
|
|
9e3529060e | ||
|
|
1672b3ecec | ||
|
|
55f9435fad | ||
|
|
072f6868d3 | ||
|
|
8a64b46a2f | ||
|
|
14901c39aa | ||
|
|
e095127ae9 | ||
|
|
a9da27444f | ||
|
|
49fe89741d | ||
|
|
847916860d | ||
|
|
564b942ead | ||
|
|
3d99fda0f6 | ||
|
|
6c0ab75477 | ||
|
|
89b238a5c4 | ||
|
|
05371b83f0 | ||
|
|
acafb869e7 | ||
|
|
74c5fe1115 | ||
|
|
2279e9657f | ||
|
|
707752a7bf | ||
|
|
0316c22026 | ||
|
|
5a1e5639c2 | ||
|
|
950963375b | ||
|
|
e52435b993 | ||
|
|
2b72c57cb0 | ||
|
|
9c67b2a42d | ||
|
|
0b38aeb5a4 | ||
|
|
2daf873983 | ||
|
|
904c5dce90 | ||
|
|
57c6d78df8 | ||
|
|
74e9d0f764 | ||
|
|
98eb0eb649 | ||
|
|
15de0c21c1 | ||
|
|
7b65767803 | ||
|
|
46640e4ff9 | ||
|
|
912906f050 | ||
|
|
ec02cf442b | ||
|
|
0e9cd1eea5 | ||
|
|
e18ea24561 | ||
|
|
723309ff75 | ||
|
|
9bfad7d324 | ||
|
|
448e0abacb | ||
|
|
2a6d827e7a | ||
|
|
e7bd1b45dc | ||
|
|
6ded240089 | ||
|
|
99a20bc383 | ||
|
|
18903ce500 | ||
|
|
b76e22ffcf | ||
|
|
d6863d6832 | ||
|
|
bb01a3990f | ||
|
|
409631ceb1 | ||
|
|
f1264c7e47 | ||
|
|
a61b8d9961 | ||
|
|
eac57a1f7a | ||
|
|
5512d5c03e | ||
|
|
8cf7be4651 | ||
|
|
3363b4fb79 | ||
|
|
ddb5cce2a5 | ||
|
|
1b0e9c45f4 | ||
|
|
2e2ccb6f61 | ||
|
|
01c8bba56d | ||
|
|
17cb1fe84b | ||
|
|
78ae87031b | ||
|
|
bf93ea73c4 | ||
|
|
a23e7478a0 | ||
|
|
9ba2ee5c88 | ||
|
|
fe33a492c2 | ||
|
|
77c0ff89fb | ||
|
|
7c2d83e2f8 | ||
|
|
40aa47c888 | ||
|
|
c1bd7bcce5 | ||
|
|
7720222588 | ||
|
|
fff07ce19c | ||
|
|
464de56d28 | ||
|
|
342c206550 | ||
|
|
fe4734d019 | ||
|
|
b1a43bb312 | ||
|
|
929703213f | ||
|
|
78279ffb4a | ||
|
|
0b919e2ba7 | ||
|
|
bb5267f0c9 | ||
|
|
6d4916954b | ||
|
|
8e067b3d3f | ||
|
|
87500e8bb5 | ||
|
|
41174867ed | ||
|
|
276fbebdac | ||
|
|
03df993e14 | ||
|
|
701f1a9538 | ||
|
|
71ed4512dc | ||
|
|
57dff347a6 | ||
|
|
fb7cb057c4 | ||
|
|
1b924c501e | ||
|
|
aed4313995 | ||
|
|
61d86f7718 | ||
|
|
717b56698a | ||
|
|
c92a7ff705 | ||
|
|
d05489c670 | ||
|
|
4806e8a7b3 | ||
|
|
b74f3f577d | ||
|
|
d5ddf1ecac | ||
|
|
e27ea22fe4 | ||
|
|
51fe5a4ceb | ||
|
|
3847c4fe63 | ||
|
|
ef2daf8857 | ||
|
|
064409eb62 | ||
|
|
ddc5d9f04d | ||
|
|
433a80c6fc | ||
|
|
78405bb5fd | ||
|
|
98e514e5f4 | ||
|
|
29538a9f45 | ||
|
|
1826048ca3 | ||
|
|
798fbb793e | ||
|
|
d7b16419ef | ||
|
|
f13aba78b1 | ||
|
|
3220c2055c | ||
|
|
1cbc927ccb | ||
|
|
acb94dd9b7 | ||
|
|
233fbb39f3 | ||
|
|
198d3cda32 | ||
|
|
e8c64b4217 | ||
|
|
89b64ae1f7 | ||
|
|
fc8a5a1b5c | ||
|
|
d4c793e010 | ||
|
|
8a3058818c | ||
|
|
ba9a106f72 | ||
|
|
310725eb72 | ||
|
|
51a8236316 | ||
|
|
f3dd00895b | ||
|
|
49df98f5a8 | ||
|
|
15cf3c4134 | ||
|
|
1abe97351d | ||
|
|
f757e29915 | ||
|
|
31e474c5fa | ||
|
|
dcf8202d7c | ||
|
|
ae55fa3153 | ||
|
|
7f9f21317c | ||
|
|
0d4bf83da3 | ||
|
|
0a6b1fb304 | ||
|
|
fb7e43dd23 | ||
|
|
45d90a5ae4 | ||
|
|
48f1305a8a | ||
|
|
cd4d6502b8 | ||
|
|
dff366e1a4 | ||
|
|
ca526e2bc0 | ||
|
|
e423d42106 | ||
|
|
82d2be2e4a | ||
|
|
4102b760d0 | ||
|
|
65654ee7c0 | ||
|
|
b2d6ceeb9c | ||
|
|
d8231016f8 | ||
|
|
3c2b329675 | ||
|
|
96ad8dd510 | ||
|
|
44f38a31ec | ||
|
|
fb2ff753ad | ||
|
|
bb3db7e272 | ||
|
|
c94b072925 | ||
|
|
26ae9c6e04 | ||
|
|
c8d7221ec5 | ||
|
|
7fd03dc311 | ||
|
|
4e8a088cc5 | ||
|
|
9c751c1197 | ||
|
|
875583b7ef | ||
|
|
38e5aa77c4 | ||
|
|
57a1d75e52 | ||
|
|
51d19d797f | ||
|
|
029a684c25 | ||
|
|
f2679d9598 | ||
|
|
bddca171ee | ||
|
|
18171b8543 | ||
|
|
d846eec5e8 | ||
|
|
e2c90339c5 | ||
|
|
4a0b14a4f2 | ||
|
|
90518a0fbd | ||
|
|
cd23cc61ca | ||
|
|
a67ea30bb7 | ||
|
|
f3b7c683f0 | ||
|
|
8decc54467 | ||
|
|
5adcf7677f | ||
|
|
07f03d499f | ||
|
|
c5068efef0 | ||
|
|
66678dc63b | ||
|
|
b2834d3c28 | ||
|
|
cff50bec6b | ||
|
|
4d6350b3b0 | ||
|
|
48966b42bb | ||
|
|
97cb8ad50d | ||
|
|
ae08a797ae | ||
|
|
2634fadfcb | ||
|
|
0c1f19556d | ||
|
|
19caae3da8 | ||
|
|
2989afbf46 | ||
|
|
730a84af92 | ||
|
|
5b77133c3b | ||
|
|
329ac0347d | ||
|
|
15d7eec1f9 | ||
|
|
cff17a4cae | ||
|
|
9fa2c6af89 | ||
|
|
e067961714 | ||
|
|
7a96e03148 | ||
|
|
e9b3cc873a | ||
|
|
5f2259c48f | ||
|
|
e14912974d | ||
|
|
813ce24d79 | ||
|
|
e2ce5ab6da | ||
|
|
89ca903c41 | ||
|
|
e3c7e21c40 | ||
|
|
e97ea5407d | ||
|
|
8db5c118c3 | ||
|
|
61ad844891 | ||
|
|
2c8f5d8fc1 | ||
|
|
8a504cd5ae | ||
|
|
99a1cc704f | ||
|
|
166ab58b99 | ||
|
|
8bc1ee8346 | ||
|
|
285b68879a | ||
|
|
1ac3efe701 | ||
|
|
ce76682db7 | ||
|
|
686f8515bc | ||
|
|
93bc52cc54 | ||
|
|
1108d1288a | ||
|
|
0abcd5a004 | ||
|
|
888ad8ec5c | ||
|
|
16ea0ef671 | ||
|
|
1b8e3f7c05 | ||
|
|
3ae0ebd0d8 | ||
|
|
714b7f2a84 | ||
|
|
945f8b4828 | ||
|
|
b5ccefeeb9 | ||
|
|
ea08942824 | ||
|
|
95f2a87864 | ||
|
|
38ee2defd5 | ||
|
|
0fc8ccb122 | ||
|
|
e4a3c2b95d | ||
|
|
cf4e6611d3 | ||
|
|
65429a9cc4 | ||
|
|
d764ed7c43 | ||
|
|
465e5ee769 | ||
|
|
83a6bbb640 | ||
|
|
f02d68f567 | ||
|
|
5d6a510e25 | ||
|
|
1b4d291bf7 | ||
|
|
223ee5deef | ||
|
|
8f60ac06c5 | ||
|
|
932a842ae3 | ||
|
|
618a7a4c47 | ||
|
|
9ebf43db99 | ||
|
|
e38beee85a | ||
|
|
20ac2e35fa | ||
|
|
80ee2c6d57 | ||
|
|
42c4c6dd24 | ||
|
|
7d71b610af | ||
|
|
70ede28e39 | ||
|
|
b477604339 | ||
|
|
75f9aabe75 | ||
|
|
cf512c5fcf | ||
|
|
a53d6d1a8e | ||
|
|
95ed36f9d3 | ||
|
|
94e5bc1457 | ||
|
|
366f615c9f | ||
|
|
ac2587017e | ||
|
|
1cdcf41ac7 | ||
|
|
024426df28 | ||
|
|
a0690070ae | ||
|
|
4e00f95014 | ||
|
|
0c95388f3b | ||
|
|
d255dd3b32 | ||
|
|
9b4ac64312 | ||
|
|
22f9ab4dab | ||
|
|
501953d69e | ||
|
|
66b8c5fbd7 | ||
|
|
3c6c2194bd | ||
|
|
6ca8c0eec2 | ||
|
|
ea2b01434e | ||
|
|
d5eec7d001 | ||
|
|
ab92d8d251 | ||
|
|
b9a0f1709f | ||
|
|
a59fd5865d | ||
|
|
46edf82b6b | ||
|
|
e9078d83bf | ||
|
|
79fbaa6481 | ||
|
|
9b9d3cf6fc | ||
|
|
ad5662b892 | ||
|
|
f5577e26b1 | ||
|
|
5f57785790 | ||
|
|
2a33b9faf0 | ||
|
|
3740c0a995 | ||
|
|
a4f5293e78 | ||
|
|
0c3026a2b7 | ||
|
|
5bc95fac7d | ||
|
|
36fcc4665d | ||
|
|
b0a08eb922 | ||
|
|
bb571377dc | ||
|
|
5897f4d889 | ||
|
|
999093bfc9 | ||
|
|
05b5d93365 | ||
|
|
4d7191dc48 | ||
|
|
4495dbdce6 | ||
|
|
70569b0448 | ||
|
|
823838cf01 | ||
|
|
89b5865a4c | ||
|
|
7cf9cd8c20 | ||
|
|
65ac42831f | ||
|
|
dde6dab0a1 | ||
|
|
cb1726681c | ||
|
|
cdff272163 | ||
|
|
7e935898ab | ||
|
|
6d0694f1b0 | ||
|
|
fd686cab86 | ||
|
|
4c1181c055 | ||
|
|
d6bed7181f | ||
|
|
4893a6f915 | ||
|
|
384590f016 | ||
|
|
192f077c16 | ||
|
|
a9baeab22e | ||
|
|
b7ab26539a | ||
|
|
c51f0c37da | ||
|
|
52107424dd | ||
|
|
099a65ab07 | ||
|
|
21c5724dd5 | ||
|
|
3974d98f6b | ||
|
|
2901b43906 | ||
|
|
03d7a4e7fe | ||
|
|
d5d3b12986 | ||
|
|
e4dca8ddcc | ||
|
|
011b7d52e5 | ||
|
|
3a9db45194 | ||
|
|
53f11f5479 | ||
|
|
b4ede2ac6a | ||
|
|
cbe8d77f78 | ||
|
|
5f682dabb5 | ||
|
|
120c2d342a | ||
|
|
84454b38c5 | ||
|
|
29cfa81574 | ||
|
|
73bf916182 | ||
|
|
9db3b475c0 | ||
|
|
24d682bf81 | ||
|
|
2957f3e301 | ||
|
|
07210b5734 | ||
|
|
0374661a92 | ||
|
|
28759f3269 | ||
|
|
5c3fdb48af | ||
|
|
a7828a6410 | ||
|
|
a1d46e1a92 | ||
|
|
d67db6662b | ||
|
|
c5c050bef0 | ||
|
|
96d286d6e5 | ||
|
|
9febc6b5dc | ||
|
|
045b3ca8d4 | ||
|
|
ff882a4c4f | ||
|
|
3d1a0f06c0 | ||
|
|
3488b4e6e0 | ||
|
|
c482204fcf | ||
|
|
9711fef122 | ||
|
|
91acf92666 | ||
|
|
9c2122f7de | ||
|
|
4d3ea3b59b | ||
|
|
298a6a8865 | ||
|
|
082924df1a | ||
|
|
d8478ed6f1 | ||
|
|
0538c882bc | ||
|
|
3a03a6a20c | ||
|
|
b6d0a45f6d | ||
|
|
d7f8896fac | ||
|
|
65c39e5f97 | ||
|
|
3c69861c03 | ||
|
|
05ae756b74 | ||
|
|
9051ceb6fc | ||
|
|
bad1c602f9 | ||
|
|
cee6ad34d3 | ||
|
|
38a4a56741 | ||
|
|
76076011a2 | ||
|
|
bdc0282fa7 | ||
|
|
1199bac91d | ||
|
|
1e460e5cb0 | ||
|
|
877e30d60f | ||
|
|
a972e42fba | ||
|
|
0706669047 | ||
|
|
76cef6fdfc | ||
|
|
aad2d3db59 | ||
|
|
933fc687c3 | ||
|
|
6663034295 | ||
|
|
ab5466a771 | ||
|
|
f3764b873b | ||
|
|
9ebc9d0f66 | ||
|
|
8b78f701a1 | ||
|
|
1f1f40f079 | ||
|
|
943351944a | ||
|
|
b060eb4f5d | ||
|
|
2dde729791 | ||
|
|
ccb7c0bf4b | ||
|
|
e9d04dcf8d | ||
|
|
5dceac32db | ||
|
|
ef440ead28 | ||
|
|
d0b04e790c | ||
|
|
54644a5074 | ||
|
|
52c2a465db | ||
|
|
bc4975fad4 | ||
|
|
9de3ae6dcb | ||
|
|
0aa6005c99 | ||
|
|
973dc4fd1c | ||
|
|
a5ca5ee36d | ||
|
|
603af327ac | ||
|
|
ca320d02cb | ||
|
|
5231cf4034 | ||
|
|
f631058265 | ||
|
|
1b4e60cae4 | ||
|
|
6eeaab3322 | ||
|
|
ac68d14b8d | ||
|
|
ecfc8a0d0e | ||
|
|
63148d426e | ||
|
|
a27c54568c | ||
|
|
dfc2f7a4e8 | ||
|
|
94dd86f762 | ||
|
|
841d22e26e | ||
|
|
ba8bf13ae1 | ||
|
|
2949b6063f | ||
|
|
1e88aa6c0f | ||
|
|
d9aea98220 | ||
|
|
04f4b8bcb3 | ||
|
|
fead263af3 | ||
|
|
4389c73c14 | ||
|
|
dba88705f7 | ||
|
|
715c29aad3 | ||
|
|
b244b2d59c | ||
|
|
22371fe5bd | ||
|
|
c6fd807638 | ||
|
|
592f472a1c | ||
|
|
a65775588f | ||
|
|
da1af9b841 | ||
|
|
accd680a7e | ||
|
|
cbb031bb5d | ||
|
|
c3290771a0 | ||
|
|
cf3cb3f197 | ||
|
|
cb4ed98b3c | ||
|
|
9ee7f7b9dc | ||
|
|
300791ecfa | ||
|
|
4630b77b45 | ||
|
|
bdc43ca634 | ||
|
|
6406f05350 | ||
|
|
820b7295f0 | ||
|
|
b3611103ee | ||
|
|
0deb232d3f | ||
|
|
1366e254f9 | ||
|
|
1259f899a3 | ||
|
|
2d393f435b |
133
README.md
133
README.md
@@ -1,135 +1,24 @@
|
||||
# Introduction
|
||||
|
||||
scoutfs is a clustered in-kernel Linux filesystem designed and built
|
||||
from the ground up to support large archival systems.
|
||||
scoutfs is a clustered in-kernel Linux filesystem designed to support
|
||||
large archival systems. It features additional interfaces and metadata
|
||||
so that archive agents can perform their maintenance workflows without
|
||||
walking all the files in the namespace. Its cluster support lets
|
||||
deployments add nodes to satisfy archival tier bandwidth targets.
|
||||
|
||||
Its key differentiating features are:
|
||||
The design goal is to reach file populations in the trillions, with the
|
||||
archival bandwidth to match, while remaining operational and responsive.
|
||||
|
||||
- Integrated consistent indexing accelerates archival maintenance operations
|
||||
- Commit logs allow nodes to write concurrently without contention
|
||||
|
||||
It meets best of breed expectations:
|
||||
Highlights of the design and implementation include:
|
||||
|
||||
* Fully consistent POSIX semantics between nodes
|
||||
* Rich metadata to ensure the integrity of metadata references
|
||||
* Atomic transactions to maintain consistent persistent structures
|
||||
* First class kernel implementation for high performance and low latency
|
||||
* Integrated archival metadata replaces syncing to external databases
|
||||
* Dynamic seperation of resources lets nodes write in parallel
|
||||
* 64bit throughout; no limits on file or directory sizes or counts
|
||||
* Open GPLv2 implementation
|
||||
|
||||
Learn more in the [white paper](https://docs.wixstatic.com/ugd/aaa89b_88a5cc84be0b4d1a90f60d8900834d28.pdf).
|
||||
|
||||
# Current Status
|
||||
|
||||
**Alpha Open Source Development**
|
||||
|
||||
scoutfs is under heavy active development. We're developing it in the
|
||||
open to give the community an opportunity to affect the design and
|
||||
implementation.
|
||||
|
||||
The core architectural design elements are in place. Much surrounding
|
||||
functionality hasn't been implemented. It's appropriate for early
|
||||
adopters and interested developers, not for production use.
|
||||
|
||||
In that vein, expect significant incompatible changes to both the format
|
||||
of network messages and persistent structures. Since the format hash-checking
|
||||
has now been removed in preparation for release, if there is any doubt, mkfs
|
||||
is strongly recommended.
|
||||
|
||||
The current kernel module is developed against the RHEL/CentOS 7.x
|
||||
kernel to minimize the friction of developing and testing with partners'
|
||||
existing infrastructure. Once we're happy with the design we'll shift
|
||||
development to the upstream kernel while maintaining distro
|
||||
compatibility branches.
|
||||
|
||||
# Community Mailing List
|
||||
|
||||
Please join us on the open scoutfs-devel@scoutfs.org [mailing list
|
||||
hosted on Google Groups](https://groups.google.com/a/scoutfs.org/forum/#!forum/scoutfs-devel)
|
||||
for all discussion of scoutfs.
|
||||
|
||||
# Quick Start
|
||||
|
||||
**This following a very rough example of the procedure to get up and
|
||||
running, experience will be needed to fill in the gaps. We're happy to
|
||||
help on the mailing list.**
|
||||
|
||||
The requirements for running scoutfs on a small cluster are:
|
||||
|
||||
1. One or more nodes running x86-64 CentOS/RHEL 7.4 (or 7.3)
|
||||
2. Access to two shared block devices
|
||||
3. IPv4 connectivity between the nodes
|
||||
|
||||
The steps for getting scoutfs mounted and operational are:
|
||||
|
||||
1. Get the kernel module running on the nodes
|
||||
2. Make a new filesystem on the devices with the userspace utilities
|
||||
3. Mount the devices on all the nodes
|
||||
|
||||
In this example we use three nodes. The names of the block devices are
|
||||
the same on all the nodes. Two of the nodes will be quorum members. A
|
||||
majority of quorum members must be mounted to elect a leader to run a
|
||||
server that all the mounts connect to. It should be noted that two
|
||||
quorum members results in a majority of one, each member itself, so
|
||||
split brain elections are possible but so unlikely that it's fine for a
|
||||
demonstration.
|
||||
|
||||
1. Get the Kernel Module and Userspace Binaries
|
||||
|
||||
* Either use snapshot RPMs built from git by Versity:
|
||||
|
||||
```shell
|
||||
rpm -i https://scoutfs.s3-us-west-2.amazonaws.com/scoutfs-repo-0.0.1-1.el7_4.noarch.rpm
|
||||
yum install scoutfs-utils kmod-scoutfs
|
||||
```
|
||||
|
||||
* Or use the binaries built from checked out git repositories:
|
||||
|
||||
```shell
|
||||
yum install kernel-devel
|
||||
git clone git@github.com:versity/scoutfs.git
|
||||
make -C scoutfs
|
||||
modprobe libcrc32c
|
||||
insmod scoutfs/kmod/src/scoutfs.ko
|
||||
alias scoutfs=$PWD/scoutfs/utils/src/scoutfs
|
||||
```
|
||||
|
||||
2. Make a New Filesystem (**destroys contents**)
|
||||
|
||||
We specify quorum slots with the addresses of each of the quorum
|
||||
member nodes, the metadata device, and the data device.
|
||||
|
||||
```shell
|
||||
scoutfs mkfs -Q 0,$NODE0_ADDR,12345 -Q 1,$NODE1_ADDR,12345 /dev/meta_dev /dev/data_dev
|
||||
```
|
||||
|
||||
3. Mount the Filesystem
|
||||
|
||||
First, mount each of the quorum nodes so that they can elect and
|
||||
start a server for the remaining node to connect to. The slot numbers
|
||||
were specified with the leading "0,..." and "1,..." in the mkfs options
|
||||
above.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o quorum_slot_nr=$SLOT_NR,metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
Then mount the remaining node which can now connect to the running server.
|
||||
|
||||
```shell
|
||||
mount -t scoutfs -o metadev_path=/dev/meta_dev /dev/data_dev /mnt/scoutfs
|
||||
```
|
||||
|
||||
4. For Kicks, Observe the Metadata Change Index
|
||||
|
||||
The `meta_seq` index tracks the inodes that are changed in each
|
||||
transaction.
|
||||
|
||||
```shell
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/one; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/two; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
touch /mnt/scoutfs/one; sync
|
||||
scoutfs walk-inodes meta_seq 0 -1 /mnt/scoutfs
|
||||
```
|
||||
|
||||
404
ReleaseNotes.md
Normal file
404
ReleaseNotes.md
Normal file
@@ -0,0 +1,404 @@
|
||||
Versity ScoutFS Release Notes
|
||||
=============================
|
||||
|
||||
---
|
||||
v1.24
|
||||
\
|
||||
*Mar 14, 2025*
|
||||
|
||||
Add support for coherent read and write mmap() mappings of regular file
|
||||
data between mounts.
|
||||
|
||||
Fix a bug that was causing scoutfs utilities to parse and change some
|
||||
file names before passing them on to the kernel for processing. This
|
||||
fixes spurious scoutfs command errors for files with the offending
|
||||
patterns in their names.
|
||||
|
||||
Fix a bug where rename wasn't updating the ctime of the inode at the
|
||||
destination name if it existed.
|
||||
|
||||
---
|
||||
v1.23
|
||||
\
|
||||
*Dec 11, 2024*
|
||||
|
||||
Add support for kernels in the RHEL 9.5 minor release.
|
||||
|
||||
---
|
||||
v1.22
|
||||
\
|
||||
*Nov 1, 2024*
|
||||
|
||||
Add support for building against the RHEL9 family of kernels.
|
||||
|
||||
Fix failure of the setattr\_more ioctl() to set the attributes of a
|
||||
zero-length file when restoring.
|
||||
|
||||
Fix support for POSIX ACLs in the RHEL8 and later family of kernels.
|
||||
|
||||
Fix a race condition in the lock server that could drop lock requests
|
||||
under heavy load and cause cluster lock attempts to hang.
|
||||
|
||||
---
|
||||
v1.21
|
||||
\
|
||||
*Jul 1, 2024*
|
||||
|
||||
This release adds features that rely on incompatible changes to
|
||||
structure the file system. The process of advancing the format version
|
||||
to enable these features is described in scoutfs(5).
|
||||
|
||||
Added the ".indx." extended attribute tag which can be used to determine
|
||||
the sorting of files in a global index.
|
||||
|
||||
Added ScoutFS quotas which let rules define file size and count limits
|
||||
in terms of ".totl." extended attribute totals.
|
||||
|
||||
Added the project ID file attribute which is inherited from parent
|
||||
directories on creation. ScoutFS quota rules can reference project IDs.
|
||||
|
||||
Add a retention attribute for files which prevents modification once
|
||||
enabled.
|
||||
|
||||
---
|
||||
v1.20
|
||||
\
|
||||
*Apr 22, 2024*
|
||||
|
||||
Minor changes to packaging to better support "weak" module linking of
|
||||
the kernel module, and to including git hashes in the built package. No
|
||||
changes in runtime behaviour.
|
||||
|
||||
---
|
||||
v1.19
|
||||
\
|
||||
*Jan 30, 2024*
|
||||
|
||||
Added the log\_merge\_wait\_timeout\_ms mount option to set the timeout
|
||||
for creating log merge operations. The previous timeout, now the
|
||||
default, was too short for some systems and was resulting in consistent
|
||||
timeouts which created an excessive number of log trees waiting to be
|
||||
merged.
|
||||
|
||||
Improved performance of many in-mount server operations when there are a
|
||||
large number of log trees waiting to be merged.
|
||||
|
||||
---
|
||||
v1.18
|
||||
\
|
||||
*Nov 7, 2023*
|
||||
|
||||
Fixed a bug where background srch file compaction could stop making
|
||||
forward progress if a partial compaction operation was committed at a
|
||||
specific byte offset in a block. This would cause srch file searches to
|
||||
be progressively more expensive over time. Once this fix is running
|
||||
background compaction will resume, bringing the cost of searches back
|
||||
down.
|
||||
|
||||
---
|
||||
v1.17
|
||||
\
|
||||
*Oct 23, 2023*
|
||||
|
||||
Add support for EL8 generation kernels.
|
||||
|
||||
---
|
||||
v1.16
|
||||
\
|
||||
*Oct 4, 2023*
|
||||
|
||||
Fix an issue where the server could hang on startup if its persistent
|
||||
allocator structures were left in a specific degraded state by the
|
||||
previously active server.
|
||||
|
||||
---
|
||||
v1.15
|
||||
\
|
||||
*Jul 17, 2023*
|
||||
|
||||
Process log btree merge splicing in multiple commits. This prevents a
|
||||
rare case where pending log merge completions contain more work than can
|
||||
be done in a single server commit, causing the server to trigger an
|
||||
assert shortly after starting.
|
||||
|
||||
Fix spurious EINVAL from data writes when data\_prealloc\_contig\_only was
|
||||
set to 0.
|
||||
|
||||
---
|
||||
v1.14
|
||||
\
|
||||
*Jun 29, 2023*
|
||||
|
||||
Add get\_referring\_entries ioctl for getting directory entries that
|
||||
refer to an inode.
|
||||
|
||||
Fix excessive CPU use in the move\_blocks interface when moving a large
|
||||
number of extents.
|
||||
|
||||
Reduce fragmented data allocation when contig\_only prealloc is not in
|
||||
use by more consistently allocating multi-block extents within each
|
||||
aligned prealloc region.
|
||||
|
||||
Avoid rare deadlock in metadata block cache recalim under both heavy
|
||||
load and memory pressure.
|
||||
|
||||
Fix crash when using quorum\_heartbeat\_timeout\_ms mount option.
|
||||
|
||||
---
|
||||
v1.13
|
||||
\
|
||||
*May 19, 2023*
|
||||
|
||||
Add the quorum\_heartbeat\_timeout\_ms mount option to set the quorum
|
||||
heartbeat timeout.
|
||||
|
||||
Change some task prioritization and allocation behavior of the quorum
|
||||
agent to help reduce delays in sending and receiving heartbeat messages.
|
||||
|
||||
---
|
||||
v1.12
|
||||
\
|
||||
*Apr 17, 2023*
|
||||
|
||||
Add the prepare-empty-data-device scoutfs command. A data device can be
|
||||
unused when no files have data blocks, perhaps because they're archived
|
||||
and offline. In this case the data device can be swapped out for
|
||||
another device without changes to the metadata device.
|
||||
|
||||
Fix an oversight which limited inode timestamps to second granularity
|
||||
for some operations. All operations now record timestamps with full
|
||||
nanosecond precision.
|
||||
|
||||
Fix spurious ENOENT failures when renaming from other directories into
|
||||
the root directory.
|
||||
|
||||
---
|
||||
v1.11
|
||||
\
|
||||
*Feb 2, 2023*
|
||||
|
||||
Fixed a free extent processing error that could prevent mount from
|
||||
proceeding when free data extents were sufficiently fragmented. It now
|
||||
properly handle very fragmented free extent maps.
|
||||
|
||||
Fixed a statfs server processing race that could return spurious errors
|
||||
and shut down the server. With the race closed statfs processing is
|
||||
reliable.
|
||||
|
||||
Fixed a rare livelock in the move\_blocks ioctl. With the right
|
||||
relationship between ioctl arguments and eventual file extent items the
|
||||
core loop in the move\_blocks ioctl could get stuck looping on an extent
|
||||
item and never return. The loop exit conditions were fixed and the loop
|
||||
will always advance through all extents.
|
||||
|
||||
Changed the 'print' scoutfs commands to flush the block cache for the
|
||||
devices. It was inconvenient to expect cache flushing to be a separate
|
||||
step to ensure consistency with remote node writes.
|
||||
|
||||
---
|
||||
v1.10
|
||||
\
|
||||
*Dec 7, 2022*
|
||||
|
||||
Fixed a potential directory entry cache management deadlock that could
|
||||
occur when many nodes performed heavy metadata write loads across shared
|
||||
directories and their child subdirectories. The deadlock could halt
|
||||
invalidation progress on a node which could then stop use of locks that
|
||||
needed invalidation on that node which would result in almost all tasks
|
||||
hanging on those locks that would never make progress.
|
||||
|
||||
Fixed a circumstance where metadata change sequence index item
|
||||
modification could leave behind old stale metadata sequence items. The
|
||||
duplication case required concurrent metadata updates across mounts with
|
||||
particular open transaction patterns so the duplicate items are rare.
|
||||
They resulted in a small amount of additional load when walking change
|
||||
indexes but had no effect on correctness.
|
||||
|
||||
Fixed a rare case where sparse file extension might not write partial
|
||||
blocks of zeros which was found in testing. This required using
|
||||
truncate to extend files past file sizes that end in partial blocks
|
||||
along with the right transaction commit and memory reclaim patterns.
|
||||
This never affected regular non-sparse files nor files prepopulated with
|
||||
fallocate.
|
||||
|
||||
---
|
||||
v1.9
|
||||
\
|
||||
*Oct 29, 2022*
|
||||
|
||||
Fix VFS cached directory entry consistency verification that could cause
|
||||
spurious "no such file or directory" (ENOENT) errors from rename over
|
||||
NFS under certain conditions. The problem was only every with the
|
||||
consistency of in-memory cached dentry objects, persistent data was
|
||||
correct and eventual eviction of the bad cached objects would stop
|
||||
generating the errors.
|
||||
|
||||
---
|
||||
v1.8
|
||||
\
|
||||
*Oct 18, 2022*
|
||||
|
||||
Add support for Linux POSIX Access Control Lists, as described in
|
||||
acl(5). Mount options are added to enable ("acl") and disable ("noacl")
|
||||
support. The default is to support ACLs. ACLs are stored in the
|
||||
existing extended attribute scheme so adding support is does not require
|
||||
a format change.
|
||||
|
||||
Add options to control data extent preallocation. The default behavior
|
||||
does not change. The options can relax the limits on preallocation
|
||||
which will then trigger under more write patterns and increase the risk
|
||||
of preallocated space which is never used. The options are described in
|
||||
scoutfs(5).
|
||||
|
||||
---
|
||||
v1.7
|
||||
\
|
||||
*Aug 26, 2022*
|
||||
|
||||
* **Fixed possible persistent errors moving freed data extents**
|
||||
\
|
||||
Fixed a case where the server could hit persistent errors trying to
|
||||
move a client's freed extents in one commit. The client had to free
|
||||
a large number of extents that occupied distant positions in the
|
||||
global free extent btree. Very large fragmented files could cause
|
||||
this. The server now moves the freed extents in multiple commits and
|
||||
can always ensure forward progress.
|
||||
|
||||
* **Fixed possible persistent errors from freed duplicate extents**
|
||||
\
|
||||
Background orphan deletion wasn't properly synchronizing with
|
||||
foreground tasks deleting very large files. If a deletion took long
|
||||
enough then background deletion could also attempt to delete inode items
|
||||
while the deletion was making progress. This could create duplicate
|
||||
deletions of data extent items which causes the server to abort when
|
||||
it later discovers the duplicate extents as it merges free lists.
|
||||
|
||||
---
|
||||
v1.6
|
||||
\
|
||||
*Jul 7, 2022*
|
||||
|
||||
* **Fix memory leaks in rare corner cases**
|
||||
\
|
||||
Analysis tools found a few corner cases that leaked small structures,
|
||||
generally around error handling or startup and shutdown.
|
||||
|
||||
* **Add --skip-likely-huge scoutfs print command option**
|
||||
\
|
||||
Add an option to scoutfs print to reduce the size of the output
|
||||
so that it can be used to see system-wide metadata without being
|
||||
overwhelmed by file-level details.
|
||||
|
||||
---
|
||||
v1.5
|
||||
\
|
||||
*Jun 21, 2022*
|
||||
|
||||
* **Fix persistent error during server startup**
|
||||
\
|
||||
Fixed a case where the server would always hit a consistent error on
|
||||
seartup, preventing the system from mounting. This required a rare
|
||||
but valid state across the clients.
|
||||
|
||||
* **Fix a client hang that would lead to fencing**
|
||||
\
|
||||
The client module's use of in-kernel networking was missing annotation
|
||||
that could lead to communication hanging. The server would fence the
|
||||
client when it stopped communicating. This could be identified by the
|
||||
server fencing a client after it disconnected with no attempt by the
|
||||
client to reconnect.
|
||||
|
||||
---
|
||||
v1.4
|
||||
\
|
||||
*May 6, 2022*
|
||||
|
||||
* **Fix possible client crash during server failover**
|
||||
\
|
||||
Fixed a narrow window during server failover and lock recovery that
|
||||
could cause a client mount to believe that it had an inconsistent item
|
||||
cache and panic. This required very specific lock state and messaging
|
||||
patterns between multiple mounts and multiple servers which made it
|
||||
unlikely to occur in the field.
|
||||
|
||||
---
|
||||
v1.3
|
||||
\
|
||||
*Apr 7, 2022*
|
||||
|
||||
* **Fix rare server instability under heavy load**
|
||||
\
|
||||
Fixed a case of server instability under heavy load due to concurrent
|
||||
work fully exhausting metadata block allocation pools reserved for a
|
||||
single server transaction. This would cause brief interruption as the
|
||||
server shutdown and the next server started up and made progress as
|
||||
pending work was retried.
|
||||
|
||||
* **Fix slow fencing preventing server startup**
|
||||
\
|
||||
If a server had to process many fence requests with a slow fencing
|
||||
mechanism it could be interrupted before it finished. The server
|
||||
now makes sure heartbeat messages are sent while it is making progress
|
||||
on fencing requests so that other quorum members don't interrupt the
|
||||
process.
|
||||
|
||||
* **Performance improvement in getxattr and setxattr**
|
||||
\
|
||||
Kernel allocation patterns in the getxattr and setxattr
|
||||
implementations were causing significant contention between CPUs. Their
|
||||
allocation strategy was changed so that concurrent tasks can call these
|
||||
xattr methods without degrading performance.
|
||||
|
||||
---
|
||||
v1.2
|
||||
\
|
||||
*Mar 14, 2022*
|
||||
|
||||
* **Fix deadlock between fallocate() and read() system calls**
|
||||
\
|
||||
Fixed a lock inversion that could cause two tasks to deadlock if they
|
||||
performed fallocate() and read() on a file at the same time. The
|
||||
deadlock was uninterruptible so the machine needed to be rebooted. This
|
||||
was relatively rare as fallocate() is usually used to prepare files
|
||||
before they're used.
|
||||
|
||||
* **Fix instability from heavy file deletion workloads**
|
||||
\
|
||||
Fixed rare circumstances under which background file deletion cleanup
|
||||
tasks could try to delete a file while it is being deleted by another
|
||||
task. Heavy load across multiple nodes, either many files being deleted
|
||||
or large files being deleted, increased the chances of this happening.
|
||||
Heavy staging could cause this problem because staging can create many
|
||||
internal temporary files that need to be deleted.
|
||||
|
||||
---
|
||||
v1.1
|
||||
\
|
||||
*Feb 4, 2022*
|
||||
|
||||
|
||||
* **Add scoutfs(1) change-quorum-config command**
|
||||
\
|
||||
Add a change-quorum-config command to scoutfs(1) to change the quorum
|
||||
configuration stored in the metadata device while the file system is
|
||||
unmounted. This can be used to change the mounts that will
|
||||
participate in quorum and the IP addresses they use.
|
||||
|
||||
* **Fix Rare Risk of Item Cache Corruption**
|
||||
\
|
||||
Code review found a rare potential source of item cache corruption.
|
||||
If this happened it would look as though deleted parts of the filesystem
|
||||
returned, but only at the time they were deleted. Old deleted items are
|
||||
not affected. This problem only affected the item cache, never
|
||||
persistent storage. Unmounting and remounting would drop the bad item
|
||||
cache and resync it with the correct persistent data.
|
||||
|
||||
---
|
||||
v1.0
|
||||
\
|
||||
*Nov 8, 2021*
|
||||
|
||||
|
||||
* **Initial Release**
|
||||
\
|
||||
Version 1.0 marks the first GA release.
|
||||
@@ -12,17 +12,22 @@ else
|
||||
SP = @:
|
||||
endif
|
||||
|
||||
SCOUTFS_GIT_DESCRIBE := \
|
||||
SCOUTFS_GIT_DESCRIBE ?= \
|
||||
$(shell git describe --all --abbrev=6 --long 2>/dev/null || \
|
||||
echo no-git)
|
||||
|
||||
ESCAPED_GIT_DESCRIBE := \
|
||||
$(shell echo $(SCOUTFS_GIT_DESCRIBE) |sed -e 's/\//\\\//g')
|
||||
|
||||
RPM_GITHASH ?= $(shell git rev-parse --short HEAD)
|
||||
|
||||
SCOUTFS_ARGS := SCOUTFS_GIT_DESCRIBE=$(SCOUTFS_GIT_DESCRIBE) \
|
||||
RPM_GITHASH=$(RPM_GITHASH) \
|
||||
CONFIG_SCOUTFS_FS=m -C $(SK_KSRC) M=$(CURDIR)/src \
|
||||
EXTRA_CFLAGS="-Werror"
|
||||
|
||||
# - We use the git describe from tags to set up the RPM versioning
|
||||
RPM_VERSION := $(shell git describe --long --tags | awk -F '-' '{gsub(/^v/,""); print $$1}')
|
||||
RPM_GITHASH := $(shell git rev-parse --short HEAD)
|
||||
TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
|
||||
|
||||
@@ -31,17 +36,18 @@ TARFILE = scoutfs-kmod-$(RPM_VERSION).tar
|
||||
all: module
|
||||
|
||||
module:
|
||||
make $(SCOUTFS_ARGS)
|
||||
$(SP) make C=2 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
$(MAKE) $(SCOUTFS_ARGS)
|
||||
$(SP) $(MAKE) C=2 CF="-D__CHECK_ENDIAN__" $(SCOUTFS_ARGS)
|
||||
|
||||
|
||||
modules_install:
|
||||
make $(SCOUTFS_ARGS) modules_install
|
||||
$(MAKE) $(SCOUTFS_ARGS) modules_install
|
||||
|
||||
|
||||
%.spec: %.spec.in .FORCE
|
||||
sed -e 's/@@VERSION@@/$(RPM_VERSION)/g' \
|
||||
-e 's/@@GITHASH@@/$(RPM_GITHASH)/g' < $< > $@+
|
||||
-e 's/@@GITHASH@@/$(RPM_GITHASH)/g' \
|
||||
-e 's/@@GITDESCRIBE@@/$(ESCAPED_GIT_DESCRIBE)/g' < $< > $@+
|
||||
mv $@+ $@
|
||||
|
||||
|
||||
@@ -50,4 +56,4 @@ dist: scoutfs-kmod.spec
|
||||
@ tar rf $(TARFILE) --transform="s@\(.*\)@scoutfs-kmod-$(RPM_VERSION)/\1@" scoutfs-kmod.spec
|
||||
|
||||
clean:
|
||||
make $(SCOUTFS_ARGS) clean
|
||||
$(MAKE) $(SCOUTFS_ARGS) clean
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
%define kmod_name scoutfs
|
||||
%define kmod_version @@VERSION@@
|
||||
%define kmod_git_hash @@GITHASH@@
|
||||
%define kmod_git_describe @@GITDESCRIBE@@
|
||||
%define pkg_date %(date +%%Y%%m%%d)
|
||||
|
||||
# take kernel version or default to uname -r
|
||||
%{!?kversion: %global kversion %(uname -r)}
|
||||
%global kernel_version %{kversion}
|
||||
|
||||
%if 0%{?el7}
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}.$(arch)
|
||||
%global kernel_release() %{kversion}
|
||||
%else
|
||||
%global kernel_source() /usr/src/kernels/%{kernel_version}
|
||||
%endif
|
||||
|
||||
%{!?_release: %global _release 0.%{pkg_date}git%{kmod_git_hash}}
|
||||
|
||||
%if 0%{?el7}
|
||||
Name: %{kmod_name}
|
||||
%else
|
||||
Name: kmod-%{kmod_name}
|
||||
%endif
|
||||
Summary: %{kmod_name} kernel module
|
||||
Version: %{kmod_version}
|
||||
Release: %{_release}%{?dist}
|
||||
@@ -20,24 +28,42 @@ License: GPLv2
|
||||
Group: System/Kernel
|
||||
URL: http://scoutfs.org/
|
||||
|
||||
%if 0%{?el7}
|
||||
BuildRequires: %{kernel_module_package_buildreqs}
|
||||
BuildRequires: git
|
||||
%else
|
||||
BuildRequires: elfutils-libelf-devel
|
||||
%endif
|
||||
BuildRequires: kernel-devel-uname-r = %{kernel_version}
|
||||
BuildRequires: git
|
||||
BuildRequires: module-init-tools
|
||||
|
||||
ExclusiveArch: x86_64
|
||||
|
||||
Source: %{kmod_name}-kmod-%{kmod_version}.tar
|
||||
|
||||
%if 0%{?el7}
|
||||
# Build only for standard kernel variant(s); for debug packages, append "debug"
|
||||
# after "default" (separated by space)
|
||||
%kernel_module_package default
|
||||
%endif
|
||||
|
||||
# Disable the building of the debug package(s).
|
||||
%define debug_package %{nil}
|
||||
%global install_mod_dir extra/%{kmod_name}
|
||||
|
||||
%global install_mod_dir extra/%{name}
|
||||
%if ! 0%{?el7}
|
||||
%global flavors_to_build x86_64
|
||||
%endif
|
||||
|
||||
# el9 sanity: make sure we lock to the minor release we built for and block upgrades
|
||||
%{lua:
|
||||
if string.match(rpm.expand("%{dist}"), "%.el9") then
|
||||
rpm.define("el9 1")
|
||||
end
|
||||
}
|
||||
|
||||
%if 0%{?el9}
|
||||
%define release_major_minor 9.%{lua: print(rpm.expand("%{dist}"):match("%.el9_(%d)"))}
|
||||
Requires: system-release = %{release_major_minor}
|
||||
%endif
|
||||
|
||||
%description
|
||||
%{kmod_name} - kernel module
|
||||
@@ -57,7 +83,7 @@ echo "Building for kernel: %{kernel_version} flavors: '%{flavors_to_build}'"
|
||||
for flavor in %flavors_to_build; do
|
||||
rm -rf obj/$flavor
|
||||
cp -r source obj/$flavor
|
||||
make SK_KSRC=%{kernel_source $flavor} -C obj/$flavor module
|
||||
make RPM_GITHASH=%{kmod_git_hash} SCOUTFS_GIT_DESCRIBE=%{kmod_git_describe} SK_KSRC=%{kernel_source $flavor} -C obj/$flavor module
|
||||
done
|
||||
|
||||
%install
|
||||
@@ -66,7 +92,7 @@ export INSTALL_MOD_DIR=%{install_mod_dir}
|
||||
mkdir -p %{install_mod_dir}
|
||||
for flavor in %{flavors_to_build}; do
|
||||
export KSRC=%{kernel_source $flavor}
|
||||
export KVERSION=%{kernel_release $KSRC}
|
||||
export KVERSION=%{kversion}
|
||||
install -d $INSTALL_MOD_PATH/lib/modules/$KVERSION/%{install_mod_dir}
|
||||
cp $PWD/obj/$flavor/src/scoutfs.ko $INSTALL_MOD_PATH/lib/modules/$KVERSION/%{install_mod_dir}/
|
||||
done
|
||||
@@ -74,7 +100,23 @@ done
|
||||
# mark modules executable so that strip-to-file can strip them
|
||||
find %{buildroot} -type f -name \*.ko -exec %{__chmod} u+x \{\} \;
|
||||
|
||||
%if ! 0%{?el7}
|
||||
%files
|
||||
/lib/modules
|
||||
|
||||
%post
|
||||
echo /lib/modules/%{kversion}/%{install_mod_dir}/scoutfs.ko | weak-modules --add-modules --no-initramfs
|
||||
depmod -a
|
||||
%endif
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
%preun
|
||||
# stash our modules for postun cleanup
|
||||
SCOUTFS_RPM_NAME=$(rpm -q %{name} | grep "%{version}-%{release}")
|
||||
rpm -ql $SCOUTFS_RPM_NAME | grep '\.ko$' > /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
%postun
|
||||
cat /var/run/%{name}-modules-%{version}-%{release} | weak-modules --remove-modules --no-initramfs
|
||||
rm /var/run/%{name}-modules-%{version}-%{release} || true
|
||||
|
||||
@@ -8,6 +8,8 @@ CFLAGS_scoutfs_trace.o = -I$(src) # define_trace.h double include
|
||||
-include $(src)/Makefile.kernelcompat
|
||||
|
||||
scoutfs-y += \
|
||||
acl.o \
|
||||
attr_x.o \
|
||||
avl.o \
|
||||
alloc.o \
|
||||
block.o \
|
||||
@@ -18,18 +20,23 @@ scoutfs-y += \
|
||||
dir.o \
|
||||
export.o \
|
||||
ext.o \
|
||||
fence.o \
|
||||
file.o \
|
||||
forest.o \
|
||||
inode.o \
|
||||
ioctl.o \
|
||||
item.o \
|
||||
kernelcompat.o \
|
||||
lock.o \
|
||||
lock_server.o \
|
||||
msg.o \
|
||||
net.o \
|
||||
omap.o \
|
||||
options.o \
|
||||
per_task.o \
|
||||
quorum.o \
|
||||
quota.o \
|
||||
recov.o \
|
||||
scoutfs_trace.o \
|
||||
server.o \
|
||||
sort_priv.o \
|
||||
@@ -37,9 +44,12 @@ scoutfs-y += \
|
||||
srch.o \
|
||||
super.o \
|
||||
sysfs.o \
|
||||
totl.o \
|
||||
trans.o \
|
||||
triggers.o \
|
||||
tseq.o \
|
||||
volopt.o \
|
||||
wkic.o \
|
||||
xattr.o
|
||||
|
||||
#
|
||||
|
||||
@@ -7,23 +7,13 @@
|
||||
ccflags-y += -include $(src)/kernelcompat.h
|
||||
|
||||
#
|
||||
# v3.10-rc6-21-gbb6f619b3a49
|
||||
# v3.18-rc2-19-gb5ae6b15bd73
|
||||
#
|
||||
# Folds d_materialise_unique into d_splice_alias. Note reversal
|
||||
# of arguments (Also note Documentation/filesystems/porting.rst)
|
||||
#
|
||||
# _readdir changes from fop->readdir() to fop->iterate() and from
|
||||
# filldir(dirent) to dir_emit(ctx).
|
||||
#
|
||||
ifneq (,$(shell grep 'iterate.*dir_context' include/linux/fs.h))
|
||||
ccflags-y += -DKC_ITERATE_DIR_CONTEXT
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.10-rc6-23-g5f99f4e79abc
|
||||
#
|
||||
# Helpers including dir_emit_dots() are added in the process of
|
||||
# switching dcache_readdir() from fop->readdir() to fop->iterate()
|
||||
#
|
||||
ifneq (,$(shell grep 'dir_emit_dots' include/linux/fs.h))
|
||||
ccflags-y += -DKC_DIR_EMIT_DOTS
|
||||
ifneq (,$(shell grep 'd_materialise_unique' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_MATERIALISE_UNIQUE=1
|
||||
endif
|
||||
|
||||
#
|
||||
@@ -34,3 +24,413 @@ endif
|
||||
ifneq (,$(shell grep 'FMODE_KABI_ITERATE' include/linux/fs.h))
|
||||
ccflags-y += -DKC_FMODE_KABI_ITERATE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.7-rc2-23-g0d4d717f2583
|
||||
#
|
||||
# Added user_ns argument to posix_acl_valid
|
||||
#
|
||||
ifneq (,$(shell grep 'posix_acl_valid.*user_namespace' include/linux/posix_acl.h))
|
||||
ccflags-y += -DKC_POSIX_ACL_VALID_USER_NS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.3-12296-g6d2052d188d9
|
||||
#
|
||||
# The RBCOMPUTE function is now passed an extra flag, and should return a bool
|
||||
# to indicate whether the propagated callback should stop or not.
|
||||
#
|
||||
ifneq (,$(shell grep 'static inline bool RBNAME.*_compute_max' include/linux/rbtree_augmented.h))
|
||||
ccflags-y += -DKC_RB_TREE_AUGMENTED_COMPUTE_MAX
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.13-25-g37bc15392a23
|
||||
#
|
||||
# Renames posix_acl_create to __posix_acl_create and provide some
|
||||
# new interfaces for creating ACLs
|
||||
#
|
||||
ifneq (,$(shell grep '__posix_acl_create' include/linux/posix_acl.h))
|
||||
ccflags-y += -DKC___POSIX_ACL_CREATE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.8-rc1-29-g31051c85b5e2
|
||||
#
|
||||
# inode_change_ok() removed - replace with setattr_prepare()
|
||||
# v5.11-rc4-7-g2f221d6f7b88 removes extern attribute
|
||||
#
|
||||
ifneq (,$(shell grep 'int setattr_prepare' include/linux/fs.h))
|
||||
ccflags-y += -DKC_SETATTR_PREPARE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.15-rc3-4-gae5e165d855d
|
||||
#
|
||||
# linux/iversion.h needs to manually be included for code that
|
||||
# manipulates this field.
|
||||
#
|
||||
ifneq (,$(shell grep -s 'define _LINUX_IVERSION_H' include/linux/iversion.h))
|
||||
ccflags-y += -DKC_NEED_LINUX_IVERSION_H=1
|
||||
endif
|
||||
|
||||
# v4.11-12447-g104b4e5139fe
|
||||
#
|
||||
# Renamed __percpu_counter_add to percpu_counter_add_batch to clarify
|
||||
# that the __ wasn't less safe, just took an extra parameter.
|
||||
#
|
||||
ifneq (,$(shell grep 'percpu_counter_add_batch' include/linux/percpu_counter.h))
|
||||
ccflags-y += -DKC_PERCPU_COUNTER_ADD_BATCH
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.11-4550-g7dea19f9ee63
|
||||
#
|
||||
# Introduced memalloc_nofs_{save,restore} preferred instead of _noio_.
|
||||
#
|
||||
ifneq (,$(shell grep 'memalloc_nofs_save' include/linux/sched/mm.h))
|
||||
ccflags-y += -DKC_MEMALLOC_NOFS_SAVE
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.7-12414-g1eff9d322a44
|
||||
#
|
||||
# Renamed bi_rw to bi_opf to force old code to catch up. We use it as a
|
||||
# single switch between old and new bio structures.
|
||||
#
|
||||
ifneq (,$(shell grep 'bi_opf' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_BIO_BI_OPF
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.12-rc2-201-g4e4cbee93d56
|
||||
#
|
||||
# Moves to bi_status BLK_STS_ API instead of having a mix of error
|
||||
# end_io args or bi_error.
|
||||
#
|
||||
ifneq (,$(shell grep 'bi_status' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_BIO_BI_STATUS
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.11-8765-ga0b02131c5fc
|
||||
#
|
||||
# Remove the old ->shrink() API, ->{scan,count}_objects is preferred.
|
||||
#
|
||||
ifneq (,$(shell grep '(*shrink)' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_SHRINK
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.19-4777-g6bec00352861
|
||||
#
|
||||
# backing_dev_info is removed from address_space. Instead we need to use
|
||||
# inode_to_bdi() inline from <backing-dev.h>.
|
||||
#
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_BACKING_DEV_INFO=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.3-9290-ge409de992e3e
|
||||
#
|
||||
# xattr handlers are now passed a struct that contains `flags`
|
||||
#
|
||||
ifneq (,$(shell grep 'int...get..const struct xattr_handler.*struct dentry.*dentry,' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_STRUCT_XATTR_HANDLER=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc1-1-g9b2c45d479d0
|
||||
#
|
||||
# kernel_getsockname() and kernel_getpeername dropped addrlen arg
|
||||
#
|
||||
ifneq (,$(shell grep 'kernel_getsockname.*,$$' include/linux/net.h))
|
||||
ccflags-y += -DKC_KERNEL_GETSOCKNAME_ADDRLEN=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.1-rc1-410-geeb1bd5c40ed
|
||||
#
|
||||
# Adds a struct net parameter to sock_create_kern
|
||||
#
|
||||
ifneq (,$(shell grep 'sock_create_kern.*struct net' include/linux/net.h))
|
||||
ccflags-y += -DKC_SOCK_CREATE_KERN_NET=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.18-rc6-1619-gc0371da6047a
|
||||
#
|
||||
# iov_iter is now part of struct msghdr
|
||||
#
|
||||
ifneq (,$(shell grep 'struct iov_iter.*msg_iter' include/linux/socket.h))
|
||||
ccflags-y += -DKC_MSGHDR_STRUCT_IOV_ITER=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.17-rc6-7-g95582b008388
|
||||
#
|
||||
# Kernel has current_time(inode) to uniformly retreive timespec in the right unit
|
||||
#
|
||||
ifneq (,$(shell grep 'struct timespec64 current_time' include/linux/fs.h))
|
||||
ccflags-y += -DKC_CURRENT_TIME_INODE=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.9-12228-g530e9b76ae8f
|
||||
#
|
||||
# register_cpu_notifier and family were all removed and to be
|
||||
# replaced with cpuhp_* API calls.
|
||||
#
|
||||
ifneq (,$(shell grep 'define register_hotcpu_notifier' include/linux/cpu.h))
|
||||
ccflags-y += -DKC_CPU_NOTIFIER
|
||||
endif
|
||||
|
||||
#
|
||||
# v3.14-rc8-130-gccad2365668f
|
||||
#
|
||||
# generic_file_buffered_write is removed, backport it
|
||||
#
|
||||
ifneq (,$(shell grep 'extern ssize_t generic_file_buffered_write' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_FILE_BUFFERED_WRITE=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-438-g8151b4c8bee4
|
||||
#
|
||||
# struct address_space_operations switches away from .readpages to .readahead
|
||||
#
|
||||
# RHEL has backported this feature all the way to RHEL8, as part of RHEL_KABI,
|
||||
# which means we need to detect this very precisely
|
||||
#
|
||||
ifneq (,$(shell grep 'readahead.*struct readahead_control' include/linux/fs.h))
|
||||
ccflags-y += -DKC_FILE_AOPS_READAHEAD
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.0-rc7-1743-g8436318205b9
|
||||
#
|
||||
# .aio_read and .aio_write no longer exist. All reads and writes now use the
|
||||
# .read_iter and .write_iter methods, or must implement .read and .write (which
|
||||
# we don't).
|
||||
#
|
||||
ifneq (,$(shell grep 'ssize_t.*aio_read' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_HAVE_FOP_AIO_READ=1
|
||||
endif
|
||||
|
||||
#
|
||||
# rhel7 has a custom inode_operations_wrapper struct that is discarded
|
||||
# entirely in favor of upstream structure since rhel8.
|
||||
#
|
||||
ifneq (,$(shell grep 'void.*follow_link.*struct dentry' include/linux/fs.h))
|
||||
ccflags-y += -DKC_LINUX_HAVE_RHEL_IOPS_WRAPPER=1
|
||||
endif
|
||||
|
||||
ifneq (,$(shell grep 'size_t.*ki_left;' include/linux/aio.h))
|
||||
ccflags-y += -DKC_LINUX_AIO_KI_LEFT=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.4-rc4-4-g98e9cb5711c6
|
||||
#
|
||||
# Introduces a new xattr_handler .name member that can be used to match the
|
||||
# entire field, instead of just a prefix. For these kernels, we must use
|
||||
# the new .name field instead.
|
||||
ifneq (,$(shell grep 'static inline const char .xattr_prefix' include/linux/xattr.h))
|
||||
ccflags-y += -DKC_XATTR_HANDLER_NAME=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-96-g342a72a33407
|
||||
#
|
||||
# Adds `typedef __u32 __bitwise blk_opf_t` to aid flag checking
|
||||
ifneq (,$(shell grep 'typedef __u32 __bitwise blk_opf_t' include/linux/blk_types.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_OPF_T=1
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.12-rc6-9-g4f0f586bf0c8
|
||||
#
|
||||
# list_sort cmp function takes const list_head args
|
||||
ifneq (,$(shell grep 'const struct list_head ., const struct list_head .' include/linux/list_sort.h))
|
||||
ccflags-y += -DKC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
endif
|
||||
|
||||
# v5.7-523-g88dca4ca5a93
|
||||
#
|
||||
# The pgprot argument to vmalloc is always PAGE_KERNEL, so it is removed.
|
||||
ifneq (,$(shell grep 'extern void .__vmalloc.unsigned long size, gfp_t gfp_mask, pgprot_t prot' include/linux/vmalloc.h))
|
||||
ccflags-y += -DKC_VMALLOC_PGPROT_T
|
||||
endif
|
||||
|
||||
# v6.2-rc1-18-g01beba7957a2
|
||||
#
|
||||
# fs: port inode_owner_or_capable() to mnt_idmap
|
||||
ifneq (,$(shell grep 'bool inode_owner_or_capable.struct user_namespace .mnt_userns' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_OWNER_OR_CAPABLE_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-5-g47291baa8ddf
|
||||
#
|
||||
# namei: make permission helpers idmapped mount aware
|
||||
ifneq (,$(shell grep 'int inode_permission.struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_PERMISSION_USERNS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.11-rc4-24-g549c7297717c
|
||||
#
|
||||
# fs: make helpers idmap mount aware
|
||||
# Enlarges the VFS API methods to include user namespace argument.
|
||||
ifneq (,$(shell grep 'int ..mknod. .struct user_namespace' include/linux/fs.h))
|
||||
ccflags-y += -DKC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-rc2-21-g07888c665b40
|
||||
#
|
||||
# Detect new style bio_alloc - pass bdev and opf.
|
||||
ifneq (,$(shell grep 'struct bio .bio_alloc.struct block_device .bdev' include/linux/bio.h))
|
||||
ccflags-y += -DKC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc4-53-gcddf8a2c4a82
|
||||
#
|
||||
# fiemap_prep() replaces fiemap_check_flags()
|
||||
ifneq (,$(shell grep -s 'int fiemap_prep.struct inode' include/linux/fiemap.h))
|
||||
ccflags-y += -DKC_FIEMAP_PREP
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.17-13043-g800ba29547e1
|
||||
#
|
||||
# generic_perform_write args use kiocb for passing filp and pos
|
||||
ifneq (,$(shell grep 'ssize_t generic_perform_write.struct kiocb ., struct iov_iter' include/linux/fs.h))
|
||||
ccflags-y += -DKC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2496-g76ee0785f42a
|
||||
#
|
||||
# net: add sock_set_sndtimeo
|
||||
ifneq (,$(shell grep 'void sock_set_sndtimeo.struct sock' include/net/sock.h))
|
||||
ccflags-y += -DKC_SOCK_SET_SNDTIMEO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.8-rc4-1931-gba423fdaa589
|
||||
#
|
||||
# setsockopt functions are now passed a sockptr_t value instead of char*
|
||||
ifneq (,$(shell grep -s 'include .linux/sockptr.h.' include/linux/net.h))
|
||||
ccflags-y += -DKC_SETSOCKOPT_SOCKPTR_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.7-rc6-2507-g71c48eb81c9e
|
||||
#
|
||||
# Adds a bunch of low level TCP sock parameter functions that we want to use.
|
||||
ifneq (,$(shell grep 'int tcp_sock_set_keepintvl' include/linux/tcp.h))
|
||||
ccflags-y += -DKC_HAVE_TCP_SET_SOCKFN
|
||||
endif
|
||||
|
||||
#
|
||||
# v4.16-rc3-13-ga84d1169164b
|
||||
#
|
||||
# Fixes y2038 issues with struct timeval.
|
||||
ifneq (,$(shell grep -s '^struct __kernel_old_timeval .' include/uapi/linux/time_types.h))
|
||||
ccflags-y += -DKC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.19-rc4-52-ge33c267ab70d
|
||||
#
|
||||
# register_shrinker now requires a name, used for debug stats etc.
|
||||
ifneq (,$(shell grep 'int __printf.*register_shrinker.struct shrinker .shrinker,' include/linux/shrinker.h))
|
||||
ccflags-y += -DKC_SHRINKER_NAME
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-246-gf132ab7d3ab0
|
||||
#
|
||||
# mpage_readpage() is now replaced with mpage_read_folio.
|
||||
ifneq (,$(shell grep 'int mpage_read_folio.struct folio .folio' include/linux/mpage.h))
|
||||
ccflags-y += -DKC_MPAGE_READ_FOLIO
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.18-rc5-219-gb3992d1e2ebc
|
||||
#
|
||||
# block_write_begin() no longer is being passed aop_flags
|
||||
ifneq (,$(shell grep -C1 'int block_write_begin' include/linux/buffer_head.h | tail -n 2 | grep 'unsigned flags'))
|
||||
ccflags-y += -DKC_BLOCK_WRITE_BEGIN_AOP_FLAGS
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.0-rc6-9-g863f144f12ad
|
||||
#
|
||||
# the .tmpfile() vfs method calling convention changed and now a struct
|
||||
# file* is passed to this metiond instead of a dentry. The function also
|
||||
# should open the created file and call finish_open_simple() before returning.
|
||||
ifneq (,$(shell grep 'extern void d_tmpfile.struct dentry' include/linux/dcache.h))
|
||||
ccflags-y += -DKC_D_TMPFILE_DENTRY
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-201-g0733ad800291
|
||||
#
|
||||
# New blk_mode_t replaces abuse of fmode_t
|
||||
ifneq (,$(shell grep 'typedef unsigned int __bitwise blk_mode_t' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_HAVE_BLK_MODE_T
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc2-186-g2736e8eeb0cc
|
||||
#
|
||||
# Reworks FMODE_EXCL kludge and instead modifies the blkdev_put() call to pass in
|
||||
# the (exclusive) holder to implement FMODE_EXCL handling.
|
||||
ifneq (,$(shell grep 'blkdev_put.struct block_device .bdev, void .holder' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BLKDEV_PUT_HOLDER_ARG
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.4-rc4-163-g0d625446d0a4
|
||||
#
|
||||
# Entirely removes current->backing_dev_info to ultimately remove buffer_head
|
||||
# completely at some point.
|
||||
ifneq (,$(shell grep 'struct backing_dev_info.*backing_dev_info;' include/linux/sched.h))
|
||||
ccflags-y += -DKC_CURRENT_BACKING_DEV_INFO
|
||||
endif
|
||||
|
||||
#
|
||||
# v6.8-rc1-4-gf3a608827d1f
|
||||
#
|
||||
# adds bdev_file_open_by_path() and later in v6.8-rc1-30-ge97d06a46526 removes bdev_open_by_path()
|
||||
# which requires us to use the file method from now on.
|
||||
ifneq (,$(shell grep 'struct file.*bdev_file_open_by_path.const char.*path' include/linux/blkdev.h))
|
||||
ccflags-y += -DKC_BDEV_FILE_OPEN_BY_PATH
|
||||
endif
|
||||
|
||||
# v4.0-rc7-1796-gfe0f07d08ee3
|
||||
#
|
||||
# direct-io changes modify inode_dio_done to now be called inode_dio_end
|
||||
ifneq (,$(shell grep 'void inode_dio_end.struct inode' include/linux/fs.h))
|
||||
ccflags-y += -DKC_INODE_DIO_END
|
||||
endif
|
||||
|
||||
#
|
||||
# v5.0-6476-g3d3539018d2c
|
||||
#
|
||||
# page fault handlers return a bitmask vm_fault_t instead
|
||||
# Note: el8's header has a slightly modified prefix here
|
||||
ifneq (,$(shell grep 'typedef.*__bitwise unsigned.*int vm_fault_t' include/linux/mm_types.h))
|
||||
ccflags-y += -DKC_MM_VM_FAULT_T
|
||||
endif
|
||||
|
||||
# v3.19-499-gd83a08db5ba6
|
||||
#
|
||||
# .remap pages becomes obsolete
|
||||
ifneq (,$(shell grep 'int ..remap_pages..struct vm_area_struct' include/linux/mm.h))
|
||||
ccflags-y += -DKC_MM_REMAP_PAGES
|
||||
endif
|
||||
|
||||
377
kmod/src/acl.c
Normal file
377
kmod/src/acl.c
Normal file
@@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright (C) 2022 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "xattr.h"
|
||||
#include "acl.h"
|
||||
#include "inode.h"
|
||||
#include "trans.h"
|
||||
|
||||
/*
|
||||
* POSIX draft ACLs are stored as full xattr items with the entries
|
||||
* encoded as the kernel's posix_acl_xattr_{header,entry} value structs.
|
||||
*
|
||||
* They're accessed and modified via user facing synthetic xattrs, iops
|
||||
* calls from the kernel, during inode mode changes, and during inode
|
||||
* creation.
|
||||
*
|
||||
* ACL access devolves into xattr access which is relatively expensive
|
||||
* so we maintain the cached native form in the vfs inode. We drop the
|
||||
* cache in lock invalidation which means that cached acl access must
|
||||
* always be performed under cluster locking.
|
||||
*/
|
||||
|
||||
static int acl_xattr_name_len(int type, char **name, size_t *name_len)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
*name = XATTR_NAME_POSIX_ACL_ACCESS;
|
||||
if (name_len)
|
||||
*name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
*name = XATTR_NAME_POSIX_ACL_DEFAULT;
|
||||
if (name_len)
|
||||
*name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
char *value = NULL;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
|
||||
acl = get_cached_acl(inode, type);
|
||||
if (acl != ACL_NOT_CACHED)
|
||||
return acl;
|
||||
#endif
|
||||
|
||||
ret = acl_xattr_name_len(type, &name, NULL);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = scoutfs_xattr_get_locked(inode, name, NULL, 0, lock);
|
||||
if (ret > 0) {
|
||||
value = kzalloc(ret, GFP_NOFS);
|
||||
if (!value)
|
||||
ret = -ENOMEM;
|
||||
else
|
||||
ret = scoutfs_xattr_get_locked(inode, name, value, ret, lock);
|
||||
}
|
||||
if (ret > 0) {
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, ret);
|
||||
} else if (ret == -ENODATA || ret == 0) {
|
||||
acl = NULL;
|
||||
} else {
|
||||
acl = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* can set null negative cache */
|
||||
if (!IS_ERR(acl))
|
||||
set_cached_acl(inode, type, acl);
|
||||
|
||||
kfree(value);
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct posix_acl *acl;
|
||||
int ret;
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
if (!IS_POSIXACL(inode))
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, 0, inode, &lock);
|
||||
if (ret < 0) {
|
||||
acl = ERR_PTR(ret);
|
||||
} else {
|
||||
acl = scoutfs_get_acl_locked(inode, type, lock);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
}
|
||||
|
||||
return acl;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller has acquired the locks and dirtied the inode, they'll
|
||||
* update the inode item if we return 0.
|
||||
*/
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks)
|
||||
{
|
||||
static const struct scoutfs_xattr_prefix_tags tgs = {0,}; /* never scoutfs. prefix */
|
||||
bool set_mode = false;
|
||||
char *value = NULL;
|
||||
umode_t new_mode;
|
||||
size_t name_len;
|
||||
char *name;
|
||||
int size = 0;
|
||||
int ret;
|
||||
|
||||
ret = acl_xattr_name_len(type, &name, &name_len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
if (acl) {
|
||||
ret = posix_acl_update_mode(KC_VFS_INIT_NS
|
||||
inode, &new_mode, &acl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
set_mode = true;
|
||||
}
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
if (!S_ISDIR(inode->i_mode)) {
|
||||
ret = acl ? -EINVAL : 0;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (acl) {
|
||||
size = posix_acl_xattr_size(acl->a_count);
|
||||
value = kmalloc(size, GFP_NOFS);
|
||||
if (!value) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_xattr_set_locked(inode, name, name_len, value, size, 0, &tgs,
|
||||
lock, NULL, ind_locks);
|
||||
if (ret == 0 && set_mode) {
|
||||
inode->i_mode = new_mode;
|
||||
if (!value) {
|
||||
/* can be setting an acl that only affects mode, didn't need xattr */
|
||||
inode_inc_iversion(inode);
|
||||
inode->i_ctime = current_time(inode);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (!ret)
|
||||
set_cached_acl(inode, type, acl);
|
||||
|
||||
kfree(value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock) ?:
|
||||
scoutfs_inode_index_lock_hold(inode, &ind_locks, false, true);
|
||||
if (ret == 0) {
|
||||
ret = scoutfs_dirty_inode_item(inode, lock) ?:
|
||||
scoutfs_set_acl_locked(inode, acl, type, lock, &ind_locks);
|
||||
if (ret == 0)
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
|
||||
scoutfs_release_trans(sb);
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
}
|
||||
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
return ret;
|
||||
}
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *handler, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size)
|
||||
{
|
||||
int type = handler->flags;
|
||||
#else
|
||||
int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value, size_t size,
|
||||
int type)
|
||||
{
|
||||
#endif
|
||||
struct posix_acl *acl;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
acl = scoutfs_get_acl(dentry->d_inode, type);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl == NULL)
|
||||
return -ENODATA;
|
||||
|
||||
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
|
||||
posix_acl_release(acl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *handler,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags)
|
||||
{
|
||||
int type = handler->flags;
|
||||
#else
|
||||
int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size,
|
||||
int flags, int type)
|
||||
{
|
||||
#endif
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret;
|
||||
|
||||
if (!inode_owner_or_capable(KC_VFS_INIT_NS dentry->d_inode))
|
||||
return -EPERM;
|
||||
|
||||
if (!IS_POSIXACL(dentry->d_inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (value) {
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, size);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
if (acl) {
|
||||
ret = kc_posix_acl_valid(&init_user_ns, acl);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_set_acl(dentry->d_inode, acl, type);
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply the parent's default acl to new inodes access acl and inherit
|
||||
* it as the default for new directories. The caller holds locks and a
|
||||
* transaction.
|
||||
*/
|
||||
int scoutfs_init_acl_locked(struct inode *inode, struct inode *dir,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *dir_lock,
|
||||
struct list_head *ind_locks)
|
||||
{
|
||||
struct posix_acl *acl = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (!S_ISLNK(inode->i_mode)) {
|
||||
if (IS_POSIXACL(dir)) {
|
||||
acl = scoutfs_get_acl_locked(dir, ACL_TYPE_DEFAULT, dir_lock);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
}
|
||||
|
||||
if (!acl)
|
||||
inode->i_mode &= ~current_umask();
|
||||
}
|
||||
|
||||
if (IS_POSIXACL(dir) && acl) {
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_DEFAULT,
|
||||
lock, ind_locks);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
ret = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret > 0)
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_ACCESS,
|
||||
lock, ind_locks);
|
||||
} else {
|
||||
cache_no_acl(inode);
|
||||
}
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the access ACL based on a newly set mode. If we return an
|
||||
* error then the xattr wasn't changed.
|
||||
*
|
||||
* Annoyingly, setattr_copy has logic that transforms the final set mode
|
||||
* that we want to use to update the acl. But we don't want to modify
|
||||
* the other inode fields while discovering the resulting mode. We're
|
||||
* relying on acl_chmod not caring about the transformation (currently
|
||||
* just clears sgid). It would be better if we could get the resulting
|
||||
* mode to give to acl_chmod without modifying the other inode fields.
|
||||
*
|
||||
* The caller has the inode mutex, a cluster lock, transaction, and will
|
||||
* update the inode item if we return success.
|
||||
*/
|
||||
int scoutfs_acl_chmod_locked(struct inode *inode, struct iattr *attr,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_POSIXACL(inode) || !(attr->ia_valid & ATTR_MODE))
|
||||
return 0;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
acl = scoutfs_get_acl_locked(inode, ACL_TYPE_ACCESS, lock);
|
||||
if (IS_ERR_OR_NULL(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
ret = __posix_acl_chmod(&acl, GFP_KERNEL, attr->ia_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = scoutfs_set_acl_locked(inode, acl, ACL_TYPE_ACCESS, lock, ind_locks);
|
||||
posix_acl_release(acl);
|
||||
return ret;
|
||||
}
|
||||
29
kmod/src/acl.h
Normal file
29
kmod/src/acl.h
Normal file
@@ -0,0 +1,29 @@
|
||||
#ifndef _SCOUTFS_ACL_H_
|
||||
#define _SCOUTFS_ACL_H_
|
||||
|
||||
struct posix_acl *scoutfs_get_acl(struct inode *inode, int type);
|
||||
struct posix_acl *scoutfs_get_acl_locked(struct inode *inode, int type, struct scoutfs_lock *lock);
|
||||
int scoutfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
int scoutfs_set_acl_locked(struct inode *inode, struct posix_acl *acl, int type,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
#ifdef KC_XATTR_STRUCT_XATTR_HANDLER
|
||||
int scoutfs_acl_get_xattr(const struct xattr_handler *, struct dentry *dentry,
|
||||
struct inode *inode, const char *name, void *value,
|
||||
size_t size);
|
||||
int scoutfs_acl_set_xattr(const struct xattr_handler *,
|
||||
KC_VFS_NS_DEF
|
||||
struct dentry *dentry,
|
||||
struct inode *inode, const char *name, const void *value,
|
||||
size_t size, int flags);
|
||||
#else
|
||||
int scoutfs_acl_get_xattr(struct dentry *dentry, const char *name, void *value, size_t size,
|
||||
int type);
|
||||
int scoutfs_acl_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size,
|
||||
int flags, int type);
|
||||
#endif
|
||||
int scoutfs_acl_chmod_locked(struct inode *inode, struct iattr *attr,
|
||||
struct scoutfs_lock *lock, struct list_head *ind_locks);
|
||||
int scoutfs_init_acl_locked(struct inode *inode, struct inode *dir,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *dir_lock,
|
||||
struct list_head *ind_locks);
|
||||
#endif
|
||||
638
kmod/src/alloc.c
638
kmod/src/alloc.c
@@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
@@ -29,8 +30,8 @@
|
||||
* The core allocator uses extent items in btrees rooted in the super.
|
||||
* Each free extent is stored in two items. The first item is indexed
|
||||
* by block location and is used to merge adjacent extents when freeing.
|
||||
* The second item is indexed by length and is used to find large
|
||||
* extents to allocate from.
|
||||
* The second item is indexed by the order of the length and is used to
|
||||
* find large extents to allocate from.
|
||||
*
|
||||
* Free extent always consumes the front of the largest extent. This
|
||||
* attempts to discourage fragmentation by given smaller freed extents
|
||||
@@ -67,25 +68,67 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length, largest first. The block location key
|
||||
* is set to the final block in the extent so that we can find
|
||||
* intersections by calling _next() iterators starting with the block
|
||||
* we're searching for.
|
||||
* Return the order of the length of a free extent, which we define as
|
||||
* floor(log_8_(len)): 0..7 = 0, 8..63 = 1, etc.
|
||||
*/
|
||||
static void init_ext_key(struct scoutfs_key *key, int type, u64 start, u64 len)
|
||||
static u64 free_extent_order(u64 len)
|
||||
{
|
||||
return (fls64(len | 1) - 1) / 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* The smallest (non-zero) length that will be mapped to the same order
|
||||
* as the given length.
|
||||
*/
|
||||
static u64 smallest_order_length(u64 len)
|
||||
{
|
||||
return 1ULL << (free_extent_order(len) * 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* An extent modification dirties three distinct leaves of an allocator
|
||||
* btree as it adds and removes the blkno and size sorted items for the
|
||||
* old and new lengths of the extent. Dirtying the paths to these
|
||||
* leaves can grow the tree and grow/shrink neighbours at each level.
|
||||
* We over-estimate the number of blocks allocated and freed (the paths
|
||||
* share a root, growth doesn't free) to err on the simpler and safer
|
||||
* side. The overhead is minimal given the relatively large list blocks
|
||||
* and relatively short allocator trees.
|
||||
*/
|
||||
static u32 extent_mod_blocks(u32 height)
|
||||
{
|
||||
return ((1 + height) * 2) * 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free extents don't have flags and are stored in two indexes sorted by
|
||||
* block location and by length order, largest first. The location key
|
||||
* field is set to the final block in the extent so that we can find
|
||||
* intersections by calling _next() with the start of the range we're
|
||||
* searching for.
|
||||
*
|
||||
* We never store 0 length extents but we do build keys for searching
|
||||
* the order index from 0,0 without having to map it to a real extent.
|
||||
*/
|
||||
static void init_ext_key(struct scoutfs_key *key, int zone, u64 start, u64 len)
|
||||
{
|
||||
*key = (struct scoutfs_key) {
|
||||
.sk_zone = SCOUTFS_FREE_EXTENT_ZONE,
|
||||
.sk_type = type,
|
||||
.sk_zone = zone,
|
||||
};
|
||||
|
||||
if (type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE) {
|
||||
if (len == 0) {
|
||||
/* we only use 0 len extents for magic 0,0 order lookups */
|
||||
WARN_ON_ONCE(zone != SCOUTFS_FREE_EXTENT_ORDER_ZONE || start != 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
key->skfb_end = cpu_to_le64(start + len - 1);
|
||||
key->skfb_len = cpu_to_le64(len);
|
||||
} else if (type == SCOUTFS_FREE_EXTENT_LEN_TYPE) {
|
||||
key->skfl_neglen = cpu_to_le64(-len);
|
||||
key->skfl_blkno = cpu_to_le64(start);
|
||||
} else if (zone == SCOUTFS_FREE_EXTENT_ORDER_ZONE) {
|
||||
key->skfo_revord = cpu_to_le64(U64_MAX - free_extent_order(len));
|
||||
key->skfo_end = cpu_to_le64(start + len - 1);
|
||||
key->skfo_len = cpu_to_le64(len);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
@@ -93,23 +136,27 @@ static void init_ext_key(struct scoutfs_key *key, int type, u64 start, u64 len)
|
||||
|
||||
static void ext_from_key(struct scoutfs_extent *ext, struct scoutfs_key *key)
|
||||
{
|
||||
if (key->sk_type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE) {
|
||||
if (key->sk_zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
ext->start = le64_to_cpu(key->skfb_end) -
|
||||
le64_to_cpu(key->skfb_len) + 1;
|
||||
ext->len = le64_to_cpu(key->skfb_len);
|
||||
} else {
|
||||
ext->start = le64_to_cpu(key->skfl_blkno);
|
||||
ext->len = -le64_to_cpu(key->skfl_neglen);
|
||||
ext->start = le64_to_cpu(key->skfo_end) -
|
||||
le64_to_cpu(key->skfo_len) + 1;
|
||||
ext->len = le64_to_cpu(key->skfo_len);
|
||||
}
|
||||
ext->map = 0;
|
||||
ext->flags = 0;
|
||||
|
||||
/* we never store 0 length extents */
|
||||
WARN_ON_ONCE(ext->len == 0);
|
||||
}
|
||||
|
||||
struct alloc_ext_args {
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
struct scoutfs_alloc_root *root;
|
||||
int type;
|
||||
int zone;
|
||||
};
|
||||
|
||||
static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
@@ -120,13 +167,13 @@ static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
|
||||
ret = scoutfs_btree_next(sb, &args->root->root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len != 0)
|
||||
ret = -EIO;
|
||||
else if (iref.key->sk_type != args->type)
|
||||
else if (iref.key->sk_zone != args->zone)
|
||||
ret = -ENOENT;
|
||||
else
|
||||
ext_from_key(ext, iref.key);
|
||||
@@ -139,19 +186,19 @@ static int alloc_ext_next(struct super_block *sb, void *arg,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int other_type(int type)
|
||||
static int other_zone(int zone)
|
||||
{
|
||||
if (type == SCOUTFS_FREE_EXTENT_BLKNO_TYPE)
|
||||
return SCOUTFS_FREE_EXTENT_LEN_TYPE;
|
||||
else if (type == SCOUTFS_FREE_EXTENT_LEN_TYPE)
|
||||
return SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
if (zone == SCOUTFS_FREE_EXTENT_BLKNO_ZONE)
|
||||
return SCOUTFS_FREE_EXTENT_ORDER_ZONE;
|
||||
else if (zone == SCOUTFS_FREE_EXTENT_ORDER_ZONE)
|
||||
return SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert an extent along with its matching item which is indexed by
|
||||
* opposite of its len or blkno. If we succeed we update the root's
|
||||
* opposite of its order or blkno. If we succeed we update the root's
|
||||
* record of the total length of all the stored extents.
|
||||
*/
|
||||
static int alloc_ext_insert(struct super_block *sb, void *arg,
|
||||
@@ -167,8 +214,8 @@ static int alloc_ext_insert(struct super_block *sb, void *arg,
|
||||
if (WARN_ON_ONCE(map || flags))
|
||||
return -EINVAL;
|
||||
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&other, other_type(args->type), start, len);
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&other, other_zone(args->zone), start, len);
|
||||
|
||||
ret = scoutfs_btree_insert(sb, args->alloc, args->wri,
|
||||
&args->root->root, &key, NULL, 0);
|
||||
@@ -196,8 +243,8 @@ static int alloc_ext_remove(struct super_block *sb, void *arg,
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
init_ext_key(&key, args->type, start, len);
|
||||
init_ext_key(&other, other_type(args->type), start, len);
|
||||
init_ext_key(&key, args->zone, start, len);
|
||||
init_ext_key(&other, other_zone(args->zone), start, len);
|
||||
|
||||
ret = scoutfs_btree_delete(sb, args->alloc, args->wri,
|
||||
&args->root->root, &key);
|
||||
@@ -221,6 +268,7 @@ static struct scoutfs_ext_ops alloc_ext_ops = {
|
||||
.next = alloc_ext_next,
|
||||
.insert = alloc_ext_insert,
|
||||
.remove = alloc_ext_remove,
|
||||
.insert_overlap_warn = true,
|
||||
};
|
||||
|
||||
static bool invalid_extent(u64 start, u64 end, u64 first, u64 last)
|
||||
@@ -230,20 +278,17 @@ static bool invalid_extent(u64 start, u64 end, u64 first, u64 last)
|
||||
|
||||
static bool invalid_meta_blkno(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
u64 last_meta = (i_size_read(sbi->meta_bdev->bd_inode) >> SCOUTFS_BLOCK_LG_SHIFT) - 1;
|
||||
|
||||
return invalid_extent(blkno, blkno,
|
||||
le64_to_cpu(super->first_meta_blkno),
|
||||
le64_to_cpu(super->last_meta_blkno));
|
||||
return invalid_extent(blkno, blkno, SCOUTFS_META_DEV_START_BLKNO, last_meta);
|
||||
}
|
||||
|
||||
static bool invalid_data_extent(struct super_block *sb, u64 start, u64 len)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
u64 last_data = (i_size_read(sb->s_bdev->bd_inode) >> SCOUTFS_BLOCK_SM_SHIFT) - 1;
|
||||
|
||||
return invalid_extent(start, start + len - 1,
|
||||
le64_to_cpu(super->first_data_blkno),
|
||||
le64_to_cpu(super->last_data_blkno));
|
||||
return invalid_extent(start, start + len - 1, SCOUTFS_DATA_DEV_START_BLKNO, last_data);
|
||||
}
|
||||
|
||||
void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
|
||||
@@ -619,7 +664,7 @@ int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
@@ -645,6 +690,14 @@ int scoutfs_dalloc_return_cached(struct super_block *sb,
|
||||
*
|
||||
* Unlike meta allocations, the caller is expected to serialize
|
||||
* allocations from the root.
|
||||
*
|
||||
* ENOBUFS is returned if the data allocator ran out of space and we can
|
||||
* probably refill it from the server. The caller is expected to back
|
||||
* out, commit the transaction, and try again.
|
||||
*
|
||||
* ENOSPC is returned if the data allocator ran out of space but we have
|
||||
* a flag from the server telling us that there's no more space
|
||||
* available. This is a hard error and should be returned.
|
||||
*/
|
||||
int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
@@ -655,7 +708,7 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = &dalloc->root,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_extent ext;
|
||||
u64 len;
|
||||
@@ -693,13 +746,13 @@ int scoutfs_alloc_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Special retval meaning there wasn't space to alloc from
|
||||
* this txn. Doesn't mean filesystem is completely full.
|
||||
* Maybe upper layers want to try again.
|
||||
*/
|
||||
if (ret == -ENOENT)
|
||||
ret = -ENOBUFS;
|
||||
if (ret == -ENOENT) {
|
||||
if (le32_to_cpu(dalloc->root.flags) & SCOUTFS_ALLOC_FLAG_LOW)
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = -ENOBUFS;
|
||||
}
|
||||
|
||||
*blkno_ret = 0;
|
||||
*count_ret = 0;
|
||||
} else {
|
||||
@@ -728,7 +781,7 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
@@ -741,6 +794,95 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the first zone bit that the extent intersects with.
|
||||
*/
|
||||
static int first_extent_zone(struct scoutfs_extent *ext, __le64 *zones, u64 zone_blocks)
|
||||
{
|
||||
int first;
|
||||
int last;
|
||||
int nr;
|
||||
|
||||
first = div64_u64(ext->start, zone_blocks);
|
||||
last = div64_u64(ext->start + ext->len - 1, zone_blocks);
|
||||
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, first);
|
||||
if (nr <= last)
|
||||
return nr;
|
||||
|
||||
return SCOUTFS_DATA_ALLOC_MAX_ZONES;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an extent in specific zones to satisfy an allocation. We use
|
||||
* the order items to search for the largest extent that intersects with
|
||||
* the zones whose bits are set in the caller's bitmap.
|
||||
*/
|
||||
static int find_zone_extent(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
__le64 *zones, u64 zone_blocks,
|
||||
struct scoutfs_extent *found_ret, u64 count,
|
||||
struct scoutfs_extent *ext_ret)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
u64 start;
|
||||
u64 len;
|
||||
int nr;
|
||||
int ret;
|
||||
|
||||
/* don't bother when there are no bits set */
|
||||
if (find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, 0) ==
|
||||
SCOUTFS_DATA_ALLOC_MAX_ZONES)
|
||||
return -ENOENT;
|
||||
|
||||
/* start searching for largest extent from the first zone */
|
||||
len = smallest_order_length(SCOUTFS_BLOCK_SM_MAX);
|
||||
nr = 0;
|
||||
|
||||
for (;;) {
|
||||
/* search for extents in the next zone at our order */
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, nr);
|
||||
if (nr >= SCOUTFS_DATA_ALLOC_MAX_ZONES) {
|
||||
/* wrap down to next smaller order if we run out of bits */
|
||||
len >>= 3;
|
||||
if (len == 0) {
|
||||
ret = -ENOENT;
|
||||
break;
|
||||
}
|
||||
nr = find_next_bit_le(zones, SCOUTFS_DATA_ALLOC_MAX_ZONES, 0);
|
||||
}
|
||||
|
||||
start = (u64)nr * zone_blocks;
|
||||
|
||||
ret = scoutfs_ext_next(sb, &alloc_ext_ops, &args, start, len, &found);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* see if the next extent intersects any zones */
|
||||
nr = first_extent_zone(&found, zones, zone_blocks);
|
||||
if (nr < SCOUTFS_DATA_ALLOC_MAX_ZONES) {
|
||||
start = (u64)nr * zone_blocks;
|
||||
|
||||
ext.start = max(start, found.start);
|
||||
ext.len = min(count, found.start + found.len - ext.start);
|
||||
|
||||
*found_ret = found;
|
||||
*ext_ret = ext;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* continue searching past extent */
|
||||
nr = div64_u64(found.start + found.len - 1, zone_blocks) + 1;
|
||||
len = smallest_order_length(found.len);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move extent items adding up to the requested total length from the
|
||||
@@ -751,6 +893,18 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
* -ENOENT is returned if we run out of extents in the source tree
|
||||
* before moving the total.
|
||||
*
|
||||
* If meta_budget is non-zero then -EINPROGRESS can be returned if the
|
||||
* the caller's budget is consumed in the allocator during this call
|
||||
* (though not necessarily by us, we don't have per-thread tracking of
|
||||
* allocator consumption :/). The call can still have made progress and
|
||||
* caller is expected commit the dirty trees and examining the resulting
|
||||
* modified trees to see if they need to continue moving extents.
|
||||
*
|
||||
* The caller can specify that extents in the source tree should first
|
||||
* be found based on their zone bitmaps. We'll first try to find
|
||||
* extents in the exclusive zones, then vacant zones, and then we'll
|
||||
* fall back to normal allocation that ignores zones.
|
||||
*
|
||||
* This first pass is not optimal because it performs full btree walks
|
||||
* per extent. We could optimize this with more clever btree item
|
||||
* manipulation functions which can iterate through src and dst blocks
|
||||
@@ -759,32 +913,100 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total)
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
};
|
||||
struct scoutfs_extent found;
|
||||
struct scoutfs_extent ext;
|
||||
u32 avail_start = 0;
|
||||
u32 freed_start = 0;
|
||||
u64 moved = 0;
|
||||
u64 count;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
if (zone_blocks == 0) {
|
||||
exclusive = NULL;
|
||||
vacant = NULL;
|
||||
}
|
||||
|
||||
if (meta_budget != 0)
|
||||
scoutfs_alloc_meta_remaining(alloc, &avail_start, &freed_start);
|
||||
|
||||
while (moved < total) {
|
||||
args.root = src;
|
||||
args.type = SCOUTFS_FREE_EXTENT_LEN_TYPE;
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args,
|
||||
0, 0, total - moved, &ext);
|
||||
count = total - moved;
|
||||
|
||||
if (exclusive) {
|
||||
/* first try to find extents in our exclusive zones */
|
||||
ret = find_zone_extent(sb, src, exclusive, zone_blocks,
|
||||
&found, count, &ext);
|
||||
if (ret == -ENOENT) {
|
||||
exclusive = NULL;
|
||||
continue;
|
||||
}
|
||||
} else if (vacant) {
|
||||
/* then try to find extents in vacant zones */
|
||||
ret = find_zone_extent(sb, src, vacant, zone_blocks,
|
||||
&found, count, &ext);
|
||||
if (ret == -ENOENT) {
|
||||
vacant = NULL;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
/* otherwise fall back to finding extents anywhere */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE;
|
||||
ret = scoutfs_ext_next(sb, &alloc_ext_ops, &args, 0, 0, &found);
|
||||
if (ret == 0) {
|
||||
ext.start = found.start;
|
||||
ext.len = min(count, found.len);
|
||||
}
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (meta_budget != 0 &&
|
||||
scoutfs_alloc_meta_low_since(alloc, avail_start, freed_start, meta_budget,
|
||||
extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
ret = -EINPROGRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
/* return partial if the server alloc can't dirty any more */
|
||||
if (scoutfs_alloc_meta_low(sb, alloc, 50 + extent_mod_blocks(src->root.height) +
|
||||
extent_mod_blocks(dst->root.height))) {
|
||||
if (WARN_ON_ONCE(!moved))
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* searching set start/len, finish initializing alloced extent */
|
||||
ext.map = found.map ? ext.start - found.start + found.map : 0;
|
||||
ext.flags = found.flags;
|
||||
|
||||
/* remove the allocation from the found extent */
|
||||
args.root = src;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
ret = scoutfs_ext_remove(sb, &alloc_ext_ops, &args, ext.start, ext.len);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* insert the allocated extent into the dest */
|
||||
args.root = dst;
|
||||
args.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
ret = scoutfs_ext_insert(sb, &alloc_ext_ops, &args, ext.start,
|
||||
ext.len, ext.map, ext.flags);
|
||||
if (ret < 0) {
|
||||
/* and put it back in src if insertion failed */
|
||||
args.root = src;
|
||||
args.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE;
|
||||
args.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE;
|
||||
err = scoutfs_ext_insert(sb, &alloc_ext_ops, &args,
|
||||
ext.start, ext.len, ext.map,
|
||||
ext.flags);
|
||||
@@ -794,6 +1016,8 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
|
||||
moved += ext.len;
|
||||
scoutfs_inc_counter(sb, alloc_moved_extent);
|
||||
|
||||
trace_scoutfs_alloc_move_extent(sb, &ext);
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, alloc_move);
|
||||
@@ -802,6 +1026,39 @@ int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new free space to an allocator. _ext_insert will make sure that it doesn't
|
||||
* overlap with any existing extents. This is done by the server in a transaction that
|
||||
* also updates total_*_blocks in the super so we don't verify.
|
||||
*/
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
|
||||
return scoutfs_ext_insert(sb, &alloc_ext_ops, &args, start, len, 0, 0);
|
||||
}
|
||||
|
||||
int scoutfs_alloc_remove(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct alloc_ext_args args = {
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
|
||||
return scoutfs_ext_remove(sb, &alloc_ext_ops, &args, start, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* We only trim one block, instead of looping trimming all, because the
|
||||
* caller is assuming that we do a fixed amount of work when they check
|
||||
@@ -848,18 +1105,22 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the allocator has enough free blocks to cow (alloc and free)
|
||||
* a list block and all the btree blocks that store extent items.
|
||||
* True if the allocator has enough blocks in the avail list and space
|
||||
* in the freed list to be able to perform the callers operations. If
|
||||
* false the caller should back off and return partial progress rather
|
||||
* than completely exhausting the avail list or overflowing the freed
|
||||
* list.
|
||||
*
|
||||
* At most, an extent operation can dirty down three paths of the tree
|
||||
* to modify a blkno item and two distant len items. We can grow and
|
||||
* split the root, and then those three paths could share blocks but each
|
||||
* modify two leaf blocks.
|
||||
* The caller tells us how many extents they're about to modify and how
|
||||
* many other additional blocks they may cow manually. And finally, the
|
||||
* caller could be the first to dirty the avail and freed blocks in the
|
||||
* allocator,
|
||||
*/
|
||||
static bool list_can_cow(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root)
|
||||
static bool list_has_blocks(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_alloc_root *root, u32 extents, u32 addl_blocks)
|
||||
{
|
||||
u32 most = 1 + (1 + 1 + (3 * (1 - root->root.height + 1)));
|
||||
u32 tree_blocks = extent_mod_blocks(root->root.height) * extents;
|
||||
u32 most = 1 + tree_blocks + addl_blocks;
|
||||
|
||||
if (le32_to_cpu(alloc->avail.first_nr) < most) {
|
||||
scoutfs_inc_counter(sb, alloc_list_avail_lo);
|
||||
@@ -901,7 +1162,7 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.type = SCOUTFS_FREE_EXTENT_LEN_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_ORDER_ZONE,
|
||||
};
|
||||
struct scoutfs_alloc_list_block *lblk;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -923,8 +1184,7 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
goto out;
|
||||
lblk = bl->data;
|
||||
|
||||
while (le32_to_cpu(lblk->nr) < target &&
|
||||
list_can_cow(sb, alloc, root)) {
|
||||
while (le32_to_cpu(lblk->nr) < target && list_has_blocks(sb, alloc, root, 1, 0)) {
|
||||
|
||||
ret = scoutfs_ext_alloc(sb, &alloc_ext_ops, &args, 0, 0,
|
||||
target - le32_to_cpu(lblk->nr), &ext);
|
||||
@@ -936,6 +1196,8 @@ int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
|
||||
for (i = 0; i < ext.len; i++)
|
||||
list_block_add(lhead, lblk, ext.start + i);
|
||||
|
||||
trace_scoutfs_alloc_fill_extent(sb, &ext);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -958,7 +1220,7 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
.alloc = alloc,
|
||||
.wri = wri,
|
||||
.root = root,
|
||||
.type = SCOUTFS_FREE_EXTENT_BLKNO_TYPE,
|
||||
.zone = SCOUTFS_FREE_EXTENT_BLKNO_ZONE,
|
||||
};
|
||||
struct scoutfs_alloc_list_block *lblk = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
@@ -968,7 +1230,7 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(lhead_in_alloc(alloc, lhead)))
|
||||
return -EINVAL;
|
||||
|
||||
while (lhead->ref.blkno && list_can_cow(sb, alloc, args.root)) {
|
||||
while (lhead->ref.blkno && list_has_blocks(sb, alloc, args.root, 1, 1)) {
|
||||
|
||||
if (lhead->first_nr == 0) {
|
||||
ret = trim_empty_first_block(sb, alloc, wri, lhead);
|
||||
@@ -1004,6 +1266,8 @@ int scoutfs_alloc_empty_list(struct super_block *sb,
|
||||
break;
|
||||
|
||||
list_block_remove(lhead, lblk, ext.len);
|
||||
|
||||
trace_scoutfs_alloc_empty_extent(sb, &ext);
|
||||
}
|
||||
|
||||
scoutfs_block_put(sb, bl);
|
||||
@@ -1091,37 +1355,82 @@ bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
return lo;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the callers callback for every persistent allocator structure
|
||||
* we can find.
|
||||
*/
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space)
|
||||
{
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
*avail_total = le32_to_cpu(alloc->avail.first_nr);
|
||||
*freed_space = list_block_space(alloc->freed.first_nr);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the caller's consumption of nr from either avail or
|
||||
* freed would end up exceeding their budget relative to the starting
|
||||
* remaining snapshot they took.
|
||||
*/
|
||||
bool scoutfs_alloc_meta_low_since(struct scoutfs_alloc *alloc, u32 avail_start, u32 freed_start,
|
||||
u32 budget, u32 nr)
|
||||
{
|
||||
u32 avail_use;
|
||||
u32 freed_use;
|
||||
u32 avail;
|
||||
u32 freed;
|
||||
|
||||
scoutfs_alloc_meta_remaining(alloc, &avail, &freed);
|
||||
|
||||
avail_use = avail_start - avail;
|
||||
freed_use = freed_start - freed;
|
||||
|
||||
return ((avail_use + nr) > budget) || ((freed_use + nr) > budget);
|
||||
}
|
||||
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag)
|
||||
{
|
||||
unsigned int seq;
|
||||
bool set;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&alloc->seqlock);
|
||||
set = !!(le32_to_cpu(alloc->avail.flags) & flag);
|
||||
} while (read_seqretry(&alloc->seqlock, seq));
|
||||
|
||||
return set;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over the allocator structures referenced by the caller's
|
||||
* super and call the caller's callback with summaries of the blocks
|
||||
* found in each structure.
|
||||
*
|
||||
* The caller's responsible for the stability of the referenced blocks.
|
||||
* If the blocks could be stale the caller must deal with retrying when
|
||||
* it sees ESTALE.
|
||||
*/
|
||||
int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_block_ref stale_refs[2] = {{0,}};
|
||||
struct scoutfs_block_ref refs[2] = {{0,}};
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct scoutfs_srch_compact *sc;
|
||||
struct scoutfs_log_merge_request *lmreq;
|
||||
struct scoutfs_log_merge_complete *lmcomp;
|
||||
struct scoutfs_log_trees lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int expected;
|
||||
u64 avail_tot;
|
||||
u64 freed_tot;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
sc = kmalloc(sizeof(struct scoutfs_srch_compact), GFP_NOFS);
|
||||
if (!super || !sc) {
|
||||
if (!sc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
refs[0] = super->logs_root.ref;
|
||||
refs[1] = super->srch_root.ref;
|
||||
|
||||
/* all the server allocators */
|
||||
ret = cb(sb, arg, SCOUTFS_ALLOC_OWNER_SERVER, 0, true, true,
|
||||
le64_to_cpu(super->meta_alloc[0].total_len)) ?:
|
||||
@@ -1211,19 +1520,152 @@ retry:
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&stale_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
BUILD_BUG_ON(sizeof(stale_refs) != sizeof(refs));
|
||||
memcpy(stale_refs, refs, sizeof(stale_refs));
|
||||
goto retry;
|
||||
/* log merge allocators */
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_REQUEST_ZONE;
|
||||
expected = sizeof(*lmreq);
|
||||
id = 0;
|
||||
avail_tot = 0;
|
||||
freed_tot = 0;
|
||||
|
||||
for (;;) {
|
||||
ret = scoutfs_btree_next(sb, &super->log_merge, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.key->sk_zone != key.sk_zone) {
|
||||
ret = -ENOENT;
|
||||
} else if (iref.val_len == expected) {
|
||||
key = *iref.key;
|
||||
if (key.sk_zone == SCOUTFS_LOG_MERGE_REQUEST_ZONE) {
|
||||
lmreq = iref.val;
|
||||
id = le64_to_cpu(lmreq->rid);
|
||||
avail_tot = le64_to_cpu(lmreq->meta_avail.total_nr);
|
||||
freed_tot = le64_to_cpu(lmreq->meta_freed.total_nr);
|
||||
} else {
|
||||
lmcomp = iref.val;
|
||||
id = le64_to_cpu(lmcomp->rid);
|
||||
avail_tot = le64_to_cpu(lmcomp->meta_avail.total_nr);
|
||||
freed_tot = le64_to_cpu(lmcomp->meta_freed.total_nr);
|
||||
}
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret == -ENOENT) {
|
||||
if (key.sk_zone == SCOUTFS_LOG_MERGE_REQUEST_ZONE) {
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.sk_zone = SCOUTFS_LOG_MERGE_COMPLETE_ZONE;
|
||||
expected = sizeof(*lmcomp);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = cb(sb, arg, SCOUTFS_ALLOC_OWNER_LOG_MERGE, id, true, true, avail_tot) ?:
|
||||
cb(sb, arg, SCOUTFS_ALLOC_OWNER_LOG_MERGE, id, true, false, freed_tot);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
kfree(super);
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
kfree(sc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the current on-disk super and use it to walk the allocators and
|
||||
* call the caller's callback. This assumes that the super it's reading
|
||||
* could be stale and will retry if it encounters stale blocks.
|
||||
*/
|
||||
int scoutfs_alloc_foreach(struct super_block *sb, scoutfs_alloc_foreach_cb_t cb, void *arg)
|
||||
{
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
int ret;
|
||||
|
||||
super = kmalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_alloc_foreach_super(sb, super, cb, arg);
|
||||
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &super->logs_root.ref,
|
||||
&super->srch_root.ref);
|
||||
} while (ret == -ESTALE);
|
||||
|
||||
out:
|
||||
kfree(super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct foreach_cb_args {
|
||||
scoutfs_alloc_extent_cb_t cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static int alloc_btree_extent_item_cb(struct super_block *sb, struct scoutfs_key *key, u64 seq,
|
||||
u8 flags, void *val, int val_len, void *arg)
|
||||
{
|
||||
struct foreach_cb_args *cba = arg;
|
||||
struct scoutfs_extent ext;
|
||||
|
||||
if (key->sk_zone != SCOUTFS_FREE_EXTENT_BLKNO_ZONE)
|
||||
return -ENOENT;
|
||||
|
||||
ext_from_key(&ext, key);
|
||||
cba->cb(sb, cba->cb_arg, &ext);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the caller's callback on each extent stored in the allocator's
|
||||
* btree. The callback sees extents called in order by starting blkno.
|
||||
*/
|
||||
int scoutfs_alloc_extents_cb(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
scoutfs_alloc_extent_cb_t cb, void *cb_arg)
|
||||
{
|
||||
struct foreach_cb_args cba = {
|
||||
.cb = cb,
|
||||
.cb_arg = cb_arg,
|
||||
};
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_ext_key(&key, SCOUTFS_FREE_EXTENT_BLKNO_ZONE, 0, 1);
|
||||
|
||||
for (;;) {
|
||||
/* will stop at order items before getting stuck in final block */
|
||||
BUILD_BUG_ON(SCOUTFS_FREE_EXTENT_BLKNO_ZONE > SCOUTFS_FREE_EXTENT_ORDER_ZONE);
|
||||
init_ext_key(&start, SCOUTFS_FREE_EXTENT_BLKNO_ZONE, 0, 1);
|
||||
init_ext_key(&end, SCOUTFS_FREE_EXTENT_ORDER_ZONE, 0, 1);
|
||||
|
||||
ret = scoutfs_btree_read_items(sb, &root->root, &key, &start, &end,
|
||||
alloc_btree_extent_item_cb, &cba);
|
||||
if (ret < 0 || end.sk_zone != SCOUTFS_FREE_EXTENT_BLKNO_ZONE) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
key = end;
|
||||
scoutfs_key_inc(&key);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -19,14 +19,11 @@
|
||||
(128ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* The largest aligned region that we'll try to allocate at the end of
|
||||
* the file as it's extended. This is also limited to the current file
|
||||
* size so we can only waste at most twice the total file size when
|
||||
* files are less than this. We try to keep this around the point of
|
||||
* diminishing returns in streaming performance of common data devices
|
||||
* to limit waste.
|
||||
* The default size that we'll try to preallocate. This is trying to
|
||||
* hit the limit of large efficient device writes while minimizing
|
||||
* wasted preallocation that is never used.
|
||||
*/
|
||||
#define SCOUTFS_DATA_EXTEND_PREALLOC_LIMIT \
|
||||
#define SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS \
|
||||
(8ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
@@ -38,6 +35,10 @@
|
||||
#define SCOUTFS_ALLOC_DATA_LG_THRESH \
|
||||
(8ULL * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_ALLOC_DATA_REFILL_THRESH \
|
||||
((256ULL * 1024 * 1024) >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
/*
|
||||
* Fill client alloc roots to the target when they fall below the lo
|
||||
* threshold.
|
||||
@@ -55,15 +56,16 @@
|
||||
#define SCOUTFS_SERVER_DATA_FILL_LO \
|
||||
(1ULL * 1024 * 1024 * 1024 >> SCOUTFS_BLOCK_SM_SHIFT)
|
||||
|
||||
|
||||
/*
|
||||
* Each of the server meta_alloc roots will try to keep a minimum amount
|
||||
* of free blocks. The server will swap roots when its current avail
|
||||
* falls below the threshold while the freed root is still above it. It
|
||||
* must have room for all the largest allocation attempted in a
|
||||
* transaction on the server.
|
||||
* Log merge meta allocations are only used for one request and will
|
||||
* never use more than the dirty limit.
|
||||
*/
|
||||
#define SCOUTFS_SERVER_META_ALLOC_MIN \
|
||||
(SCOUTFS_SERVER_META_FILL_TARGET * 2)
|
||||
#define SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT (64ULL * 1024 * 1024)
|
||||
/* a few extra blocks for alloc blocks */
|
||||
#define SCOUTFS_SERVER_MERGE_FILL_TARGET \
|
||||
((SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT >> SCOUTFS_BLOCK_LG_SHIFT) + 4)
|
||||
#define SCOUTFS_SERVER_MERGE_FILL_LO SCOUTFS_SERVER_MERGE_FILL_TARGET
|
||||
|
||||
/*
|
||||
* A run-time use of a pair of persistent avail/freed roots as a
|
||||
@@ -125,7 +127,14 @@ int scoutfs_free_data(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
int scoutfs_alloc_move(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_alloc_root *dst,
|
||||
struct scoutfs_alloc_root *src, u64 total);
|
||||
struct scoutfs_alloc_root *src, u64 total,
|
||||
__le64 *exclusive, __le64 *vacant, u64 zone_blocks, u64 meta_budget);
|
||||
int scoutfs_alloc_insert(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
int scoutfs_alloc_remove(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, struct scoutfs_alloc_root *root,
|
||||
u64 start, u64 len);
|
||||
|
||||
int scoutfs_alloc_fill_list(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
@@ -146,11 +155,23 @@ int scoutfs_alloc_splice_list(struct super_block *sb,
|
||||
|
||||
bool scoutfs_alloc_meta_low(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 nr);
|
||||
void scoutfs_alloc_meta_remaining(struct scoutfs_alloc *alloc, u32 *avail_total, u32 *freed_space);
|
||||
bool scoutfs_alloc_meta_low_since(struct scoutfs_alloc *alloc, u32 avail_start, u32 freed_start,
|
||||
u32 budget, u32 nr);
|
||||
bool scoutfs_alloc_test_flag(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc, u32 flag);
|
||||
|
||||
typedef int (*scoutfs_alloc_foreach_cb_t)(struct super_block *sb, void *arg,
|
||||
int owner, u64 id,
|
||||
bool meta, bool avail, u64 blocks);
|
||||
int scoutfs_alloc_foreach(struct super_block *sb,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg);
|
||||
int scoutfs_alloc_foreach_super(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
scoutfs_alloc_foreach_cb_t cb, void *arg);
|
||||
|
||||
typedef void (*scoutfs_alloc_extent_cb_t)(struct super_block *sb, void *cb_arg,
|
||||
struct scoutfs_extent *ext);
|
||||
int scoutfs_alloc_extents_cb(struct super_block *sb, struct scoutfs_alloc_root *root,
|
||||
scoutfs_alloc_extent_cb_t cb, void *cb_arg);
|
||||
|
||||
#endif
|
||||
|
||||
252
kmod/src/attr_x.c
Normal file
252
kmod/src/attr_x.c
Normal file
@@ -0,0 +1,252 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "lock.h"
|
||||
#include "trans.h"
|
||||
#include "attr_x.h"
|
||||
|
||||
static int validate_attr_x_input(struct super_block *sb, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX__UNKNOWN) ||
|
||||
(iax->x_flags & SCOUTFS_IOC_IAX_F__UNKNOWN))
|
||||
return -EINVAL;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_RETENTION)))
|
||||
return ret;
|
||||
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID) &&
|
||||
(ret = scoutfs_fmt_vers_unsupported(sb, SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID)))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mask indicates interest in the given attr then set the field
|
||||
* to the caller's value and return the new size if it didn't already
|
||||
* include the attr field.
|
||||
*/
|
||||
#define fill_attr(size, iax, bit, field, val) \
|
||||
({ \
|
||||
__typeof__(iax) _iax = (iax); \
|
||||
__typeof__(size) _size = (size); \
|
||||
\
|
||||
if (_iax->x_mask & (bit)) { \
|
||||
_iax->field = (val); \
|
||||
_size = max(_size, offsetof(struct scoutfs_ioctl_inode_attr_x, field) + \
|
||||
sizeof_field(struct scoutfs_ioctl_inode_attr_x, field)); \
|
||||
} \
|
||||
\
|
||||
_size; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Returns -errno on error, or >= number of bytes filled by the
|
||||
* response. 0 can be returned if no attributes are requested in the
|
||||
* input x_mask.
|
||||
*/
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
size_t size = 0;
|
||||
u64 offline;
|
||||
u64 online;
|
||||
u64 bits;
|
||||
int ret;
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_META_SEQ,
|
||||
meta_seq, scoutfs_inode_meta_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_SEQ,
|
||||
data_seq, scoutfs_inode_data_seq(inode));
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_DATA_VERSION,
|
||||
data_version, scoutfs_inode_data_version(inode));
|
||||
if (iax->x_mask & (SCOUTFS_IOC_IAX_ONLINE_BLOCKS | SCOUTFS_IOC_IAX_OFFLINE_BLOCKS)) {
|
||||
scoutfs_inode_get_onoff(inode, &online, &offline);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_ONLINE_BLOCKS,
|
||||
online_blocks, online);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_OFFLINE_BLOCKS,
|
||||
offline_blocks, offline);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_sec, inode->i_ctime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CTIME, ctime_nsec, inode->i_ctime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_sec, si->crtime.tv_sec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_CRTIME, crtime_nsec, si->crtime.tv_nsec);
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_SIZE, size, i_size_read(inode));
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX__BITS) {
|
||||
bits = 0;
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
(scoutfs_inode_get_flags(inode) & SCOUTFS_INO_FLAG_RETENTION))
|
||||
bits |= SCOUTFS_IOC_IAX_B_RETENTION;
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX__BITS, bits, bits);
|
||||
}
|
||||
size = fill_attr(size, iax, SCOUTFS_IOC_IAX_PROJECT_ID,
|
||||
project_id, scoutfs_inode_get_proj(inode));
|
||||
|
||||
ret = size;
|
||||
unlock:
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_READ);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool valid_attr_changes(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
/* provided data_version must be non-zero */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version == 0))
|
||||
return false;
|
||||
|
||||
/* can only set size or data version in new regular files */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) ||
|
||||
(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)) &&
|
||||
(!S_ISREG(inode->i_mode) || scoutfs_inode_data_version(inode) != 0))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero data_version with non-zero size */
|
||||
if (((iax->x_mask & SCOUTFS_IOC_IAX_SIZE) && (iax->size > 0)) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) || (iax->data_version == 0)))
|
||||
return false;
|
||||
|
||||
/* must provide non-zero size when setting offline extents to that size */
|
||||
if ((iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) &&
|
||||
(!(iax->x_mask & SCOUTFS_IOC_IAX_SIZE) || (iax->size == 0)))
|
||||
return false;
|
||||
|
||||
/* the retention bit only applies to regular files */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) && !S_ISREG(inode->i_mode))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
LIST_HEAD(ind_locks);
|
||||
bool set_data_seq;
|
||||
int ret;
|
||||
|
||||
/* initially all setting is root only, could loosen with finer grained checks */
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iax->x_mask == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = validate_attr_x_input(sb, iax);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE, SCOUTFS_LKF_REFRESH_INODE, inode, &lock);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/* check for errors before making any changes */
|
||||
if (!valid_attr_changes(inode, iax)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* retention prevents modification unless also clearing retention */
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0 && !((iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) &&
|
||||
!(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION)))
|
||||
goto unlock;
|
||||
|
||||
/* setting only so we don't see 0 data seq with nonzero data_version */
|
||||
if ((iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION) && (iax->data_version > 0))
|
||||
set_data_seq = true;
|
||||
else
|
||||
set_data_seq = false;
|
||||
|
||||
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq, true);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = scoutfs_dirty_inode_item(inode, lock);
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
/* creating offline extent first, it might fail */
|
||||
if (iax->x_flags & SCOUTFS_IOC_IAX_F_SIZE_OFFLINE) {
|
||||
ret = scoutfs_data_init_offline_extent(inode, iax->size, lock);
|
||||
if (ret)
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* make all changes once they're all checked and will succeed */
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_DATA_VERSION)
|
||||
scoutfs_inode_set_data_version(inode, iax->data_version);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_SIZE)
|
||||
i_size_write(inode, iax->size);
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CTIME) {
|
||||
inode->i_ctime.tv_sec = iax->ctime_sec;
|
||||
inode->i_ctime.tv_nsec = iax->ctime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_CRTIME) {
|
||||
si->crtime.tv_sec = iax->crtime_sec;
|
||||
si->crtime.tv_nsec = iax->crtime_nsec;
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_RETENTION) {
|
||||
scoutfs_inode_set_flags(inode, ~SCOUTFS_INO_FLAG_RETENTION,
|
||||
(iax->bits & SCOUTFS_IOC_IAX_B_RETENTION) ?
|
||||
SCOUTFS_INO_FLAG_RETENTION : 0);
|
||||
}
|
||||
if (iax->x_mask & SCOUTFS_IOC_IAX_PROJECT_ID)
|
||||
scoutfs_inode_set_proj(inode, iax->project_id);
|
||||
|
||||
scoutfs_update_inode_item(inode, lock, &ind_locks);
|
||||
ret = 0;
|
||||
release:
|
||||
scoutfs_release_trans(sb);
|
||||
unlock:
|
||||
scoutfs_inode_index_unlock(sb, &ind_locks);
|
||||
scoutfs_unlock(sb, lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
11
kmod/src/attr_x.h
Normal file
11
kmod/src/attr_x.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef _SCOUTFS_ATTR_X_H_
|
||||
#define _SCOUTFS_ATTR_X_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include "ioctl.h"
|
||||
|
||||
int scoutfs_get_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
int scoutfs_set_attr_x(struct inode *inode, struct scoutfs_ioctl_inode_attr_x *iax);
|
||||
|
||||
#endif
|
||||
205
kmod/src/block.c
205
kmod/src/block.c
@@ -21,6 +21,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "super.h"
|
||||
@@ -30,6 +31,7 @@
|
||||
#include "scoutfs_trace.h"
|
||||
#include "alloc.h"
|
||||
#include "triggers.h"
|
||||
#include "util.h"
|
||||
|
||||
/*
|
||||
* The scoutfs block cache manages metadata blocks that can be larger
|
||||
@@ -57,7 +59,7 @@ struct block_info {
|
||||
atomic64_t access_counter;
|
||||
struct rhashtable ht;
|
||||
wait_queue_head_t waitq;
|
||||
struct shrinker shrinker;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
struct work_struct free_work;
|
||||
struct llist_head free_llist;
|
||||
};
|
||||
@@ -118,8 +120,7 @@ do { \
|
||||
|
||||
static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
|
||||
{
|
||||
int off = offsetof(struct scoutfs_block_header, crc) +
|
||||
FIELD_SIZEOF(struct scoutfs_block_header, crc);
|
||||
int off = offsetofend(struct scoutfs_block_header, crc);
|
||||
u32 calc = crc32c(~0, (char *)hdr + off, size - off);
|
||||
|
||||
return cpu_to_le32(calc);
|
||||
@@ -128,6 +129,7 @@ static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
|
||||
static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
{
|
||||
struct block_private *bp;
|
||||
unsigned int nofs_flags;
|
||||
|
||||
/*
|
||||
* If we had multiple blocks per page we'd need to be a little
|
||||
@@ -147,8 +149,19 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
|
||||
set_bit(BLOCK_BIT_PAGE_ALLOC, &bp->bits);
|
||||
bp->bl.data = page_address(bp->page);
|
||||
} else {
|
||||
bp->virt = __vmalloc(SCOUTFS_BLOCK_LG_SIZE,
|
||||
GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
/*
|
||||
* __vmalloc doesn't pass the gfp flags down to pte
|
||||
* allocs, they're done with user alloc flags.
|
||||
* Unfortunately, some lockdep doesn't know that
|
||||
* PF_NOMEMALLOC prevents __GFP_FS reclaim and generates
|
||||
* spurious reclaim-on dependencies and warnings.
|
||||
*/
|
||||
lockdep_off();
|
||||
nofs_flags = memalloc_nofs_save();
|
||||
bp->virt = kc__vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM);
|
||||
memalloc_nofs_restore(nofs_flags);
|
||||
lockdep_on();
|
||||
|
||||
if (!bp->virt) {
|
||||
kfree(bp);
|
||||
bp = NULL;
|
||||
@@ -188,7 +201,9 @@ static void block_free(struct super_block *sb, struct block_private *bp)
|
||||
else
|
||||
BUG();
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&bp->dirty_entry));
|
||||
/* ok to tear down dirty blocks when forcing unmount */
|
||||
WARN_ON_ONCE(!scoutfs_forcing_unmount(sb) && !list_empty(&bp->dirty_entry));
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount));
|
||||
WARN_ON_ONCE(atomic_read(&bp->io_count));
|
||||
kfree(bp);
|
||||
@@ -286,10 +301,16 @@ static int block_insert(struct super_block *sb, struct block_private *bp)
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) & BLOCK_REF_INSERTED);
|
||||
|
||||
retry:
|
||||
atomic_add(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
ret = rhashtable_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
ret = rhashtable_lookup_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
if (ret < 0) {
|
||||
atomic_sub(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
if (ret == -EBUSY) {
|
||||
/* wait for pending rebalance to finish */
|
||||
synchronize_rcu();
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
atomic_inc(&binf->total_inserted);
|
||||
TRACE_BLOCK(insert, bp);
|
||||
@@ -396,6 +417,7 @@ static void block_remove_all(struct super_block *sb)
|
||||
|
||||
if (block_get_if_inserted(bp)) {
|
||||
block_remove(sb, bp);
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) != 1);
|
||||
block_put(sb, bp);
|
||||
}
|
||||
}
|
||||
@@ -415,11 +437,10 @@ static void block_remove_all(struct super_block *sb)
|
||||
* possible. Final freeing, verifying checksums, and unlinking errored
|
||||
* blocks are all done by future users of the blocks.
|
||||
*/
|
||||
static void block_end_io(struct super_block *sb, int rw,
|
||||
static void block_end_io(struct super_block *sb, blk_opf_t opf,
|
||||
struct block_private *bp, int err)
|
||||
{
|
||||
DECLARE_BLOCK_INFO(sb, binf);
|
||||
bool is_read = !(rw & WRITE);
|
||||
|
||||
if (err) {
|
||||
scoutfs_inc_counter(sb, block_cache_end_io_error);
|
||||
@@ -429,7 +450,7 @@ static void block_end_io(struct super_block *sb, int rw,
|
||||
if (!atomic_dec_and_test(&bp->io_count))
|
||||
return;
|
||||
|
||||
if (is_read && !test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
if (!op_is_write(opf) && !test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
set_bit(BLOCK_BIT_UPTODATE, &bp->bits);
|
||||
|
||||
clear_bit(BLOCK_BIT_IO_BUSY, &bp->bits);
|
||||
@@ -442,13 +463,13 @@ static void block_end_io(struct super_block *sb, int rw,
|
||||
wake_up(&binf->waitq);
|
||||
}
|
||||
|
||||
static void block_bio_end_io(struct bio *bio, int err)
|
||||
static void KC_DECLARE_BIO_END_IO(block_bio_end_io, struct bio *bio)
|
||||
{
|
||||
struct block_private *bp = bio->bi_private;
|
||||
struct super_block *sb = bp->sb;
|
||||
|
||||
TRACE_BLOCK(end_io, bp);
|
||||
block_end_io(sb, bio->bi_rw, bp, err);
|
||||
block_end_io(sb, kc_bio_get_opf(bio), bp, kc_bio_get_errno(bio));
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
@@ -456,7 +477,7 @@ static void block_bio_end_io(struct bio *bio, int err)
|
||||
* Kick off IO for a single block.
|
||||
*/
|
||||
static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
int rw)
|
||||
blk_opf_t opf)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct bio *bio = NULL;
|
||||
@@ -466,6 +487,9 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
sector_t sector;
|
||||
int ret = 0;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
sector = bp->bl.blkno << (SCOUTFS_BLOCK_LG_SHIFT - 9);
|
||||
|
||||
WARN_ON_ONCE(bp->bl.blkno == U64_MAX);
|
||||
@@ -480,14 +504,13 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
|
||||
for (off = 0; off < SCOUTFS_BLOCK_LG_SIZE; off += PAGE_SIZE) {
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOFS, SCOUTFS_BLOCK_LG_PAGES_PER);
|
||||
bio = kc_bio_alloc(sbi->meta_bdev, SCOUTFS_BLOCK_LG_PAGES_PER, opf, GFP_NOFS);
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector + (off >> 9);
|
||||
bio->bi_bdev = sbi->meta_bdev;
|
||||
kc_bio_set_sector(bio, sector + (off >> 9));
|
||||
bio->bi_end_io = block_bio_end_io;
|
||||
bio->bi_private = bp;
|
||||
|
||||
@@ -504,18 +527,18 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
|
||||
BUG();
|
||||
|
||||
if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
|
||||
submit_bio(rw, bio);
|
||||
kc_submit_bio(bio);
|
||||
bio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (bio)
|
||||
submit_bio(rw, bio);
|
||||
kc_submit_bio(bio);
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
/* let racing end_io know we're done */
|
||||
block_end_io(sb, rw, bp, ret);
|
||||
block_end_io(sb, opf, bp, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -616,14 +639,16 @@ static struct block_private *block_read(struct super_block *sb, u64 blkno)
|
||||
|
||||
if (!test_bit(BLOCK_BIT_UPTODATE, &bp->bits) &&
|
||||
test_and_clear_bit(BLOCK_BIT_NEW, &bp->bits)) {
|
||||
ret = block_submit_bio(sb, bp, READ);
|
||||
ret = block_submit_bio(sb, bp, REQ_OP_READ);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible(binf->waitq, uptodate_or_error(bp));
|
||||
if (ret == 0 && test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
wait_event(binf->waitq, uptodate_or_error(bp));
|
||||
if (test_bit(BLOCK_BIT_ERROR, &bp->bits))
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
@@ -651,10 +676,11 @@ out:
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block_header *hdr;
|
||||
struct block_private *bp = NULL;
|
||||
bool retried = false;
|
||||
__le32 crc = 0;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
@@ -667,7 +693,9 @@ retry:
|
||||
|
||||
/* corrupted writes might be a sign of a stale reference */
|
||||
if (!test_bit(BLOCK_BIT_CRC_VALID, &bp->bits)) {
|
||||
if (hdr->crc != block_calc_crc(hdr, SCOUTFS_BLOCK_LG_SIZE)) {
|
||||
crc = block_calc_crc(hdr, SCOUTFS_BLOCK_LG_SIZE);
|
||||
if (hdr->crc != crc) {
|
||||
trace_scoutfs_block_stale(sb, ref, hdr, magic, le32_to_cpu(crc));
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
@@ -675,8 +703,9 @@ retry:
|
||||
set_bit(BLOCK_BIT_CRC_VALID, &bp->bits);
|
||||
}
|
||||
|
||||
if (hdr->magic != cpu_to_le32(magic) || hdr->fsid != super->hdr.fsid ||
|
||||
if (hdr->magic != cpu_to_le32(magic) || hdr->fsid != cpu_to_le64(sbi->fsid) ||
|
||||
hdr->seq != ref->seq || hdr->blkno != ref->blkno) {
|
||||
trace_scoutfs_block_stale(sb, ref, hdr, magic, 0);
|
||||
ret = -ESTALE;
|
||||
goto out;
|
||||
}
|
||||
@@ -702,6 +731,36 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool stale_refs_match(struct scoutfs_block_ref *caller, struct scoutfs_block_ref *saved)
|
||||
{
|
||||
return !caller || (caller->blkno == saved->blkno && caller->seq == saved->seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a read of a reference that gave ESTALE should be retried or
|
||||
* should generate a hard error. If this is the second time we got
|
||||
* ESTALE from the same refs then we return EIO and the caller should
|
||||
* stop. As long as we keep seeing different refs we'll return ESTALE
|
||||
* and the caller can keep trying.
|
||||
*/
|
||||
int scoutfs_block_check_stale(struct super_block *sb, int ret,
|
||||
struct scoutfs_block_saved_refs *saved,
|
||||
struct scoutfs_block_ref *a, struct scoutfs_block_ref *b)
|
||||
{
|
||||
if (ret == -ESTALE) {
|
||||
if (stale_refs_match(a, &saved->refs[0]) && stale_refs_match(b, &saved->refs[1])){
|
||||
ret = -EIO;
|
||||
} else {
|
||||
if (a)
|
||||
saved->refs[0] = *a;
|
||||
if (b)
|
||||
saved->refs[1] = *b;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(bl))
|
||||
@@ -771,7 +830,7 @@ int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
u32 magic, struct scoutfs_block **bl_ret,
|
||||
u64 dirty_blkno, u64 *ref_blkno)
|
||||
{
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_block *cow_bl = NULL;
|
||||
struct scoutfs_block *bl = NULL;
|
||||
struct block_private *exist_bp = NULL;
|
||||
@@ -839,7 +898,7 @@ int scoutfs_block_dirty_ref(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
|
||||
hdr = bl->data;
|
||||
hdr->magic = cpu_to_le32(magic);
|
||||
hdr->fsid = super->hdr.fsid;
|
||||
hdr->fsid = cpu_to_le64(sbi->fsid);
|
||||
hdr->blkno = cpu_to_le64(bl->blkno);
|
||||
prandom_bytes(&hdr->seq, sizeof(hdr->seq));
|
||||
|
||||
@@ -913,7 +972,7 @@ int scoutfs_block_writer_write(struct super_block *sb,
|
||||
/* retry previous write errors */
|
||||
clear_bit(BLOCK_BIT_ERROR, &bp->bits);
|
||||
|
||||
ret = block_submit_bio(sb, bp, WRITE);
|
||||
ret = block_submit_bio(sb, bp, REQ_OP_WRITE);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -1013,6 +1072,16 @@ u64 scoutfs_block_writer_dirty_bytes(struct super_block *sb,
|
||||
return wri->nr_dirty_blocks * SCOUTFS_BLOCK_LG_SIZE;
|
||||
}
|
||||
|
||||
static unsigned long block_count_objects(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct block_info *binf = KC_SHRINKER_CONTAINER_OF(shrink, struct block_info);
|
||||
struct super_block *sb = binf->sb;
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_count_objects);
|
||||
|
||||
return shrinker_min_long(atomic_read(&binf->total_inserted));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a number of cached blocks that haven't been used recently.
|
||||
*
|
||||
@@ -1033,25 +1102,19 @@ u64 scoutfs_block_writer_dirty_bytes(struct super_block *sb,
|
||||
* atomically remove blocks when the only references are ours and the
|
||||
* hash table.
|
||||
*/
|
||||
static int block_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
||||
static unsigned long block_scan_objects(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct block_info *binf = container_of(shrink, struct block_info,
|
||||
shrinker);
|
||||
struct block_info *binf = KC_SHRINKER_CONTAINER_OF(shrink, struct block_info);
|
||||
struct super_block *sb = binf->sb;
|
||||
struct rhashtable_iter iter;
|
||||
struct block_private *bp;
|
||||
unsigned long nr;
|
||||
bool stop = false;
|
||||
unsigned long freed = 0;
|
||||
unsigned long nr = sc->nr_to_scan;
|
||||
u64 recently;
|
||||
|
||||
nr = sc->nr_to_scan;
|
||||
if (nr == 0)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, block_cache_scan_objects);
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_shrink);
|
||||
|
||||
nr = DIV_ROUND_UP(nr, SCOUTFS_BLOCK_LG_PAGES_PER);
|
||||
|
||||
restart:
|
||||
recently = accessed_recently(binf);
|
||||
rhashtable_walk_enter(&binf->ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
@@ -1073,11 +1136,15 @@ restart:
|
||||
if (bp == NULL)
|
||||
break;
|
||||
if (bp == ERR_PTR(-EAGAIN)) {
|
||||
/* hard reset to not hold rcu grace period across retries */
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_restart);
|
||||
goto restart;
|
||||
/*
|
||||
* We can be called from reclaim in the allocation
|
||||
* to resize the hash table itself. We have to
|
||||
* return so that the caller can proceed and
|
||||
* enable hash table iteration again.
|
||||
*/
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_stop);
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_next);
|
||||
@@ -1091,6 +1158,7 @@ restart:
|
||||
if (block_remove_solo(sb, bp)) {
|
||||
scoutfs_inc_counter(sb, block_cache_shrink_remove);
|
||||
TRACE_BLOCK(shrink, bp);
|
||||
freed++;
|
||||
nr--;
|
||||
}
|
||||
block_put(sb, bp);
|
||||
@@ -1099,9 +1167,11 @@ restart:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
out:
|
||||
return min_t(u64, (u64)atomic_read(&binf->total_inserted) * SCOUTFS_BLOCK_LG_PAGES_PER,
|
||||
INT_MAX);
|
||||
|
||||
if (stop)
|
||||
return SHRINK_STOP;
|
||||
else
|
||||
return freed;
|
||||
}
|
||||
|
||||
struct sm_block_completion {
|
||||
@@ -1109,11 +1179,11 @@ struct sm_block_completion {
|
||||
int err;
|
||||
};
|
||||
|
||||
static void sm_block_bio_end_io(struct bio *bio, int err)
|
||||
static void KC_DECLARE_BIO_END_IO(sm_block_bio_end_io, struct bio *bio)
|
||||
{
|
||||
struct sm_block_completion *sbc = bio->bi_private;
|
||||
|
||||
sbc->err = err;
|
||||
sbc->err = kc_bio_get_errno(bio);
|
||||
complete(&sbc->comp);
|
||||
bio_put(bio);
|
||||
}
|
||||
@@ -1128,9 +1198,8 @@ static void sm_block_bio_end_io(struct bio *bio, int err)
|
||||
* only layer that sees the full block buffer so we pass the calculated
|
||||
* crc to the caller for them to check in their context.
|
||||
*/
|
||||
static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
struct scoutfs_block_header *hdr, size_t len,
|
||||
__le32 *blk_crc)
|
||||
static int sm_block_io(struct super_block *sb, struct block_device *bdev, blk_opf_t opf,
|
||||
u64 blkno, struct scoutfs_block_header *hdr, size_t len, __le32 *blk_crc)
|
||||
{
|
||||
struct scoutfs_block_header *pg_hdr;
|
||||
struct sm_block_completion sbc;
|
||||
@@ -1140,8 +1209,11 @@ static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
|
||||
BUILD_BUG_ON(PAGE_SIZE < SCOUTFS_BLOCK_SM_SIZE);
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
if (WARN_ON_ONCE(len > SCOUTFS_BLOCK_SM_SIZE) ||
|
||||
WARN_ON_ONCE(!(rw & WRITE) && !blk_crc))
|
||||
WARN_ON_ONCE(!op_is_write(opf) && !blk_crc))
|
||||
return -EINVAL;
|
||||
|
||||
page = alloc_page(GFP_NOFS);
|
||||
@@ -1150,7 +1222,7 @@ static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
|
||||
pg_hdr = page_address(page);
|
||||
|
||||
if (rw & WRITE) {
|
||||
if (op_is_write(opf)) {
|
||||
memcpy(pg_hdr, hdr, len);
|
||||
if (len < SCOUTFS_BLOCK_SM_SIZE)
|
||||
memset((char *)pg_hdr + len, 0,
|
||||
@@ -1158,14 +1230,13 @@ static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
pg_hdr->crc = block_calc_crc(pg_hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
}
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, 1);
|
||||
bio = kc_bio_alloc(bdev, 1, opf, GFP_NOFS);
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bio->bi_sector = blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9);
|
||||
bio->bi_bdev = bdev;
|
||||
kc_bio_set_sector(bio, blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9));
|
||||
bio->bi_end_io = sm_block_bio_end_io;
|
||||
bio->bi_private = &sbc;
|
||||
bio_add_page(bio, page, SCOUTFS_BLOCK_SM_SIZE, 0);
|
||||
@@ -1173,12 +1244,12 @@ static int sm_block_io(struct block_device *bdev, int rw, u64 blkno,
|
||||
init_completion(&sbc.comp);
|
||||
sbc.err = 0;
|
||||
|
||||
submit_bio((rw & WRITE) ? WRITE_SYNC : READ_SYNC, bio);
|
||||
kc_submit_bio(bio);
|
||||
|
||||
wait_for_completion(&sbc.comp);
|
||||
ret = sbc.err;
|
||||
|
||||
if (ret == 0 && !(rw & WRITE)) {
|
||||
if (ret == 0 && !op_is_write(opf)) {
|
||||
memcpy(hdr, pg_hdr, len);
|
||||
*blk_crc = block_calc_crc(pg_hdr, SCOUTFS_BLOCK_SM_SIZE);
|
||||
}
|
||||
@@ -1192,14 +1263,14 @@ int scoutfs_block_read_sm(struct super_block *sb,
|
||||
struct scoutfs_block_header *hdr, size_t len,
|
||||
__le32 *blk_crc)
|
||||
{
|
||||
return sm_block_io(bdev, READ, blkno, hdr, len, blk_crc);
|
||||
return sm_block_io(sb, bdev, REQ_OP_READ, blkno, hdr, len, blk_crc);
|
||||
}
|
||||
|
||||
int scoutfs_block_write_sm(struct super_block *sb,
|
||||
struct block_device *bdev, u64 blkno,
|
||||
struct scoutfs_block_header *hdr, size_t len)
|
||||
{
|
||||
return sm_block_io(bdev, WRITE, blkno, hdr, len, NULL);
|
||||
return sm_block_io(sb, bdev, REQ_OP_WRITE, blkno, hdr, len, NULL);
|
||||
}
|
||||
|
||||
int scoutfs_block_setup(struct super_block *sb)
|
||||
@@ -1224,9 +1295,9 @@ int scoutfs_block_setup(struct super_block *sb)
|
||||
atomic_set(&binf->total_inserted, 0);
|
||||
atomic64_set(&binf->access_counter, 0);
|
||||
init_waitqueue_head(&binf->waitq);
|
||||
binf->shrinker.shrink = block_shrink;
|
||||
binf->shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&binf->shrinker);
|
||||
KC_INIT_SHRINKER_FUNCS(&binf->shrinker, block_count_objects,
|
||||
block_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&binf->shrinker, "scoutfs-block:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_WORK(&binf->free_work, block_free_work);
|
||||
init_llist_head(&binf->free_llist);
|
||||
|
||||
@@ -1237,7 +1308,7 @@ out:
|
||||
if (ret)
|
||||
scoutfs_block_destroy(sb);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_block_destroy(struct super_block *sb)
|
||||
@@ -1246,7 +1317,7 @@ void scoutfs_block_destroy(struct super_block *sb)
|
||||
struct block_info *binf = SCOUTFS_SB(sb)->block_info;
|
||||
|
||||
if (binf) {
|
||||
unregister_shrinker(&binf->shrinker);
|
||||
KC_UNREGISTER_SHRINKER(&binf->shrinker);
|
||||
block_remove_all(sb);
|
||||
flush_work(&binf->free_work);
|
||||
rhashtable_destroy(&binf->ht);
|
||||
|
||||
@@ -13,6 +13,17 @@ struct scoutfs_block {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct scoutfs_block_saved_refs {
|
||||
struct scoutfs_block_ref refs[2];
|
||||
};
|
||||
|
||||
#define DECLARE_SAVED_REFS(name) \
|
||||
struct scoutfs_block_saved_refs name = {{{0,}}}
|
||||
|
||||
int scoutfs_block_check_stale(struct super_block *sb, int ret,
|
||||
struct scoutfs_block_saved_refs *saved,
|
||||
struct scoutfs_block_ref *a, struct scoutfs_block_ref *b);
|
||||
|
||||
int scoutfs_block_read_ref(struct super_block *sb, struct scoutfs_block_ref *ref, u32 magic,
|
||||
struct scoutfs_block **bl_ret);
|
||||
void scoutfs_block_put(struct super_block *sb, struct scoutfs_block *bl);
|
||||
|
||||
1026
kmod/src/btree.c
1026
kmod/src/btree.c
File diff suppressed because it is too large
Load Diff
@@ -20,13 +20,15 @@ struct scoutfs_btree_item_ref {
|
||||
|
||||
/* caller gives an item to the callback */
|
||||
typedef int (*scoutfs_btree_item_cb)(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
void *val, int val_len, void *arg);
|
||||
|
||||
/* simple singly-linked list of items */
|
||||
struct scoutfs_btree_item_list {
|
||||
struct scoutfs_btree_item_list *next;
|
||||
struct scoutfs_key key;
|
||||
u64 seq;
|
||||
u8 flags;
|
||||
int val_len;
|
||||
u8 val[0];
|
||||
};
|
||||
@@ -82,6 +84,49 @@ int scoutfs_btree_insert_list(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_btree_item_list *lst);
|
||||
|
||||
int scoutfs_btree_parent_range(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end);
|
||||
int scoutfs_btree_get_parent(struct super_block *sb,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root);
|
||||
int scoutfs_btree_set_parent(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *par_root);
|
||||
int scoutfs_btree_rebalance(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_key *key);
|
||||
|
||||
/* merge input is a list of roots */
|
||||
struct scoutfs_btree_root_head {
|
||||
struct list_head head;
|
||||
struct scoutfs_btree_root root;
|
||||
};
|
||||
|
||||
int scoutfs_btree_merge(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
struct scoutfs_key *next_ret,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct list_head *input_list,
|
||||
bool subtree, int dirty_limit, int alloc_low, int merge_window);
|
||||
|
||||
int scoutfs_btree_free_blocks(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_btree_root *root, int free_budget);
|
||||
|
||||
void scoutfs_btree_put_iref(struct scoutfs_btree_item_ref *iref);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
@@ -31,6 +32,8 @@
|
||||
#include "net.h"
|
||||
#include "endian_swap.h"
|
||||
#include "quorum.h"
|
||||
#include "omap.h"
|
||||
#include "trans.h"
|
||||
|
||||
/*
|
||||
* The client is responsible for maintaining a connection to the server.
|
||||
@@ -47,6 +50,7 @@ struct client_info {
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work connect_dwork;
|
||||
unsigned long connect_delay_jiffies;
|
||||
|
||||
u64 server_term;
|
||||
|
||||
@@ -65,6 +69,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_net_inode_alloc ial;
|
||||
__le64 lecount = cpu_to_le64(count);
|
||||
u64 tmp;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
@@ -77,7 +82,7 @@ int scoutfs_client_alloc_inodes(struct super_block *sb, u64 count,
|
||||
|
||||
if (*nr == 0)
|
||||
ret = -ENOSPC;
|
||||
else if (*ino + *nr < *ino)
|
||||
else if (check_add_overflow(*ino, *nr - 1, &tmp))
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -114,21 +119,6 @@ int scoutfs_client_get_roots(struct super_block *sb,
|
||||
NULL, 0, roots, sizeof(*roots));
|
||||
}
|
||||
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
__le64 leseq;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
NULL, 0, &leseq, sizeof(leseq));
|
||||
if (ret == 0)
|
||||
*seq = le64_to_cpu(leseq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
@@ -150,7 +140,7 @@ static int client_lock_response(struct super_block *sb,
|
||||
void *resp, unsigned int resp_len,
|
||||
int error, void *data)
|
||||
{
|
||||
if (resp_len != sizeof(struct scoutfs_net_lock_grant_response))
|
||||
if (resp_len != sizeof(struct scoutfs_net_lock))
|
||||
return -EINVAL;
|
||||
|
||||
/* XXX error? */
|
||||
@@ -215,6 +205,120 @@ int scoutfs_client_srch_commit_compact(struct super_block *sb,
|
||||
res, sizeof(*res), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_request *req)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_GET_LOG_MERGE,
|
||||
NULL, 0, req, sizeof(*req));
|
||||
}
|
||||
|
||||
int scoutfs_client_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_complete *comp)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_MERGE,
|
||||
comp, sizeof(*comp), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_response(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
id, 0, map, sizeof(*map));
|
||||
}
|
||||
|
||||
/* The client is receiving an omap request from the server */
|
||||
static int client_open_ino_map(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
if (arg_len != sizeof(struct scoutfs_open_ino_map_args))
|
||||
return -EINVAL;
|
||||
|
||||
return scoutfs_omap_client_handle_request(sb, id, arg);
|
||||
}
|
||||
|
||||
/* The client is sending an omap request to the server */
|
||||
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_open_ino_map_args args = {
|
||||
.group_nr = cpu_to_le64(group_nr),
|
||||
.req_id = 0,
|
||||
};
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
&args, sizeof(args), map, sizeof(*map));
|
||||
}
|
||||
|
||||
/* The client is asking the server for the current volume options */
|
||||
int scoutfs_client_get_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_GET_VOLOPT,
|
||||
NULL, 0, volopt, sizeof(*volopt));
|
||||
}
|
||||
|
||||
/* The client is asking the server to update volume options */
|
||||
int scoutfs_client_set_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_SET_VOLOPT,
|
||||
volopt, sizeof(*volopt), NULL, 0);
|
||||
}
|
||||
|
||||
/* The client is asking the server to clear volume options */
|
||||
int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_CLEAR_VOLOPT,
|
||||
volopt, sizeof(*volopt), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_RESIZE_DEVICES,
|
||||
nrd, sizeof(*nrd), NULL, 0);
|
||||
}
|
||||
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
return scoutfs_net_sync_request(sb, client->conn, SCOUTFS_NET_CMD_STATFS,
|
||||
NULL, 0, nst, sizeof(*nst));
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is asking that we trigger a commit of the current log
|
||||
* trees so that they can ensure an item seq discontinuity between
|
||||
* finalized log btrees and the next set of open log btrees. If we're
|
||||
* shutting down then we're already going to perform a final commit.
|
||||
*/
|
||||
static int sync_log_trees(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id, void *arg, u16 arg_len)
|
||||
{
|
||||
if (arg_len != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (!scoutfs_unmounting(sb))
|
||||
scoutfs_trans_sync(sb, 0);
|
||||
|
||||
return scoutfs_net_response(sb, conn, cmd, id, 0, NULL, 0);
|
||||
}
|
||||
|
||||
/* The client is receiving a invalidation request from the server */
|
||||
static int client_lock(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn, u8 cmd, u64 id,
|
||||
@@ -252,8 +356,8 @@ static int client_greeting(struct super_block *sb,
|
||||
void *resp, unsigned int resp_len, int error,
|
||||
void *data)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct client_info *client = sbi->client_info;
|
||||
struct scoutfs_net_greeting *gr = resp;
|
||||
bool new_server;
|
||||
int ret;
|
||||
@@ -268,18 +372,16 @@ static int client_greeting(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->fsid != super->hdr.fsid) {
|
||||
scoutfs_warn(sb, "server sent fsid 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->fsid),
|
||||
le64_to_cpu(super->hdr.fsid));
|
||||
if (gr->fsid != cpu_to_le64(sbi->fsid)) {
|
||||
scoutfs_warn(sb, "server greeting response fsid 0x%llx did not match client fsid 0x%llx",
|
||||
le64_to_cpu(gr->fsid), sbi->fsid);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gr->version != super->version) {
|
||||
scoutfs_warn(sb, "server sent format 0x%llx, client has 0x%llx",
|
||||
le64_to_cpu(gr->version),
|
||||
le64_to_cpu(super->version));
|
||||
if (le64_to_cpu(gr->fmt_vers) != sbi->fmt_vers) {
|
||||
scoutfs_warn(sb, "server greeting response format version %llu did not match client format version %llu",
|
||||
le64_to_cpu(gr->fmt_vers), sbi->fmt_vers);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -288,6 +390,7 @@ static int client_greeting(struct super_block *sb,
|
||||
scoutfs_net_client_greeting(sb, conn, new_server);
|
||||
|
||||
client->server_term = le64_to_cpu(gr->server_term);
|
||||
client->connect_delay_jiffies = 0;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
@@ -337,6 +440,20 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're not seeing successful connections we want to back off. Each
|
||||
* connection attempt starts by setting a long connection work delay.
|
||||
* We only set a shorter delay if we see a greeting response from the
|
||||
* server. At that point we'll try to immediately reconnect if the
|
||||
* connection is broken.
|
||||
*/
|
||||
static void queue_connect_dwork(struct super_block *sb, struct client_info *client)
|
||||
{
|
||||
if (!atomic_read(&client->shutting_down) && !scoutfs_forcing_unmount(sb))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork,
|
||||
client->connect_delay_jiffies);
|
||||
}
|
||||
|
||||
/*
|
||||
* This work is responsible for maintaining a connection from the client
|
||||
* to the server. It's queued on mount and disconnect and we requeue
|
||||
@@ -360,13 +477,15 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
connect_dwork.work);
|
||||
struct super_block *sb = client->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &sbi->super;
|
||||
struct mount_options *opts = &sbi->opts;
|
||||
const bool am_quorum = opts->quorum_slot_nr >= 0;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_net_greeting greet;
|
||||
struct sockaddr_in sin;
|
||||
bool am_quorum;
|
||||
int ret;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
am_quorum = opts.quorum_slot_nr >= 0;
|
||||
|
||||
/* can unmount once server farewell handling removes our item */
|
||||
if (client->sending_farewell &&
|
||||
lookup_mounted_client_item(sb, sbi->rid) == 0) {
|
||||
@@ -376,6 +495,9 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* always wait a bit until a greeting response sets a lower delay */
|
||||
client->connect_delay_jiffies = msecs_to_jiffies(CLIENT_CONNECT_DELAY_MS);
|
||||
|
||||
ret = scoutfs_quorum_server_sin(sb, &sin);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -386,8 +508,8 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
goto out;
|
||||
|
||||
/* send a greeting to verify endpoints of each connection */
|
||||
greet.fsid = super->hdr.fsid;
|
||||
greet.version = super->version;
|
||||
greet.fsid = cpu_to_le64(sbi->fsid);
|
||||
greet.fmt_vers = cpu_to_le64(sbi->fmt_vers);
|
||||
greet.server_term = cpu_to_le64(client->server_term);
|
||||
greet.rid = cpu_to_le64(sbi->rid);
|
||||
greet.flags = 0;
|
||||
@@ -403,16 +525,15 @@ static void scoutfs_client_connect_worker(struct work_struct *work)
|
||||
if (ret)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
out:
|
||||
|
||||
/* always have a small delay before retrying to avoid storms */
|
||||
if (ret && !atomic_read(&client->shutting_down))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork,
|
||||
msecs_to_jiffies(CLIENT_CONNECT_DELAY_MS));
|
||||
if (ret)
|
||||
queue_connect_dwork(sb, client);
|
||||
}
|
||||
|
||||
static scoutfs_net_request_t client_req_funcs[] = {
|
||||
[SCOUTFS_NET_CMD_SYNC_LOG_TREES] = sync_log_trees,
|
||||
[SCOUTFS_NET_CMD_LOCK] = client_lock,
|
||||
[SCOUTFS_NET_CMD_LOCK_RECOVER] = client_lock_recover,
|
||||
[SCOUTFS_NET_CMD_OPEN_INO_MAP] = client_open_ino_map,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -425,8 +546,7 @@ static void client_notify_down(struct super_block *sb,
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
if (!atomic_read(&client->shutting_down))
|
||||
queue_delayed_work(client->workq, &client->connect_dwork, 0);
|
||||
queue_connect_dwork(sb, client);
|
||||
}
|
||||
|
||||
int scoutfs_client_setup(struct super_block *sb)
|
||||
@@ -461,7 +581,7 @@ int scoutfs_client_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue_delayed_work(client->workq, &client->connect_dwork, 0);
|
||||
queue_connect_dwork(sb, client);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
@@ -518,7 +638,7 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
if (client == NULL)
|
||||
return;
|
||||
|
||||
if (client->server_term != 0) {
|
||||
if (client->server_term != 0 && !scoutfs_forcing_unmount(sb)) {
|
||||
client->sending_farewell = true;
|
||||
ret = scoutfs_net_submit_request(sb, client->conn,
|
||||
SCOUTFS_NET_CMD_FAREWELL,
|
||||
@@ -526,10 +646,8 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
client_farewell_response,
|
||||
NULL, NULL);
|
||||
if (ret == 0) {
|
||||
ret = wait_for_completion_interruptible(
|
||||
&client->farewell_comp);
|
||||
if (ret == 0)
|
||||
ret = client->farewell_error;
|
||||
wait_for_completion(&client->farewell_comp);
|
||||
ret = client->farewell_error;
|
||||
}
|
||||
if (ret) {
|
||||
scoutfs_inc_counter(sb, client_farewell_error);
|
||||
@@ -553,3 +671,11 @@ void scoutfs_client_destroy(struct super_block *sb)
|
||||
kfree(client);
|
||||
sbi->client_info = NULL;
|
||||
}
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb)
|
||||
{
|
||||
struct client_info *client = SCOUTFS_SB(sb)->client_info;
|
||||
|
||||
if (client && client->conn)
|
||||
scoutfs_net_shutdown(sb, client->conn);
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ int scoutfs_client_commit_log_trees(struct super_block *sb,
|
||||
int scoutfs_client_get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots);
|
||||
u64 *scoutfs_client_bulk_alloc(struct super_block *sb);
|
||||
int scoutfs_client_advance_seq(struct super_block *sb, u64 *seq);
|
||||
int scoutfs_client_get_last_seq(struct super_block *sb, u64 *seq);
|
||||
int scoutfs_client_lock_request(struct super_block *sb,
|
||||
struct scoutfs_net_lock *nl);
|
||||
@@ -22,7 +21,21 @@ int scoutfs_client_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *sc);
|
||||
int scoutfs_client_srch_commit_compact(struct super_block *sb,
|
||||
struct scoutfs_srch_compact *res);
|
||||
int scoutfs_client_get_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_request *req);
|
||||
int scoutfs_client_commit_log_merge(struct super_block *sb,
|
||||
struct scoutfs_log_merge_complete *comp);
|
||||
int scoutfs_client_send_omap_response(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map *map);
|
||||
int scoutfs_client_open_ino_map(struct super_block *sb, u64 group_nr,
|
||||
struct scoutfs_open_ino_map *map);
|
||||
int scoutfs_client_get_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_set_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_clear_volopt(struct super_block *sb, struct scoutfs_volume_options *volopt);
|
||||
int scoutfs_client_resize_devices(struct super_block *sb, struct scoutfs_net_resize_devices *nrd);
|
||||
int scoutfs_client_statfs(struct super_block *sb, struct scoutfs_net_statfs *nst);
|
||||
|
||||
void scoutfs_client_net_shutdown(struct super_block *sb);
|
||||
int scoutfs_client_setup(struct super_block *sb);
|
||||
void scoutfs_client_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -30,11 +30,13 @@
|
||||
EXPAND_COUNTER(block_cache_free) \
|
||||
EXPAND_COUNTER(block_cache_free_work) \
|
||||
EXPAND_COUNTER(block_cache_remove_stale) \
|
||||
EXPAND_COUNTER(block_cache_count_objects) \
|
||||
EXPAND_COUNTER(block_cache_scan_objects) \
|
||||
EXPAND_COUNTER(block_cache_shrink) \
|
||||
EXPAND_COUNTER(block_cache_shrink_next) \
|
||||
EXPAND_COUNTER(block_cache_shrink_recent) \
|
||||
EXPAND_COUNTER(block_cache_shrink_remove) \
|
||||
EXPAND_COUNTER(block_cache_shrink_restart) \
|
||||
EXPAND_COUNTER(block_cache_shrink_stop) \
|
||||
EXPAND_COUNTER(btree_compact_values) \
|
||||
EXPAND_COUNTER(btree_compact_values_enomem) \
|
||||
EXPAND_COUNTER(btree_delete) \
|
||||
@@ -44,6 +46,16 @@
|
||||
EXPAND_COUNTER(btree_insert) \
|
||||
EXPAND_COUNTER(btree_leaf_item_hash_search) \
|
||||
EXPAND_COUNTER(btree_lookup) \
|
||||
EXPAND_COUNTER(btree_merge) \
|
||||
EXPAND_COUNTER(btree_merge_alloc_low) \
|
||||
EXPAND_COUNTER(btree_merge_delete) \
|
||||
EXPAND_COUNTER(btree_merge_delta_combined) \
|
||||
EXPAND_COUNTER(btree_merge_delta_null) \
|
||||
EXPAND_COUNTER(btree_merge_dirty_limit) \
|
||||
EXPAND_COUNTER(btree_merge_drop_old) \
|
||||
EXPAND_COUNTER(btree_merge_insert) \
|
||||
EXPAND_COUNTER(btree_merge_update) \
|
||||
EXPAND_COUNTER(btree_merge_walk) \
|
||||
EXPAND_COUNTER(btree_next) \
|
||||
EXPAND_COUNTER(btree_prev) \
|
||||
EXPAND_COUNTER(btree_split) \
|
||||
@@ -65,8 +77,6 @@
|
||||
EXPAND_COUNTER(data_write_begin_enobufs_retry) \
|
||||
EXPAND_COUNTER(dentry_revalidate_error) \
|
||||
EXPAND_COUNTER(dentry_revalidate_invalid) \
|
||||
EXPAND_COUNTER(dentry_revalidate_locked) \
|
||||
EXPAND_COUNTER(dentry_revalidate_orphan) \
|
||||
EXPAND_COUNTER(dentry_revalidate_rcu) \
|
||||
EXPAND_COUNTER(dentry_revalidate_root) \
|
||||
EXPAND_COUNTER(dentry_revalidate_valid) \
|
||||
@@ -80,9 +90,13 @@
|
||||
EXPAND_COUNTER(forest_read_items) \
|
||||
EXPAND_COUNTER(forest_roots_next_hint) \
|
||||
EXPAND_COUNTER(forest_set_bloom_bits) \
|
||||
EXPAND_COUNTER(item_cache_count_objects) \
|
||||
EXPAND_COUNTER(item_cache_scan_objects) \
|
||||
EXPAND_COUNTER(item_clear_dirty) \
|
||||
EXPAND_COUNTER(item_create) \
|
||||
EXPAND_COUNTER(item_delete) \
|
||||
EXPAND_COUNTER(item_delta) \
|
||||
EXPAND_COUNTER(item_delta_written) \
|
||||
EXPAND_COUNTER(item_dirty) \
|
||||
EXPAND_COUNTER(item_invalidate) \
|
||||
EXPAND_COUNTER(item_invalidate_page) \
|
||||
@@ -111,13 +125,10 @@
|
||||
EXPAND_COUNTER(item_update) \
|
||||
EXPAND_COUNTER(item_write_dirty) \
|
||||
EXPAND_COUNTER(lock_alloc) \
|
||||
EXPAND_COUNTER(lock_count_objects) \
|
||||
EXPAND_COUNTER(lock_free) \
|
||||
EXPAND_COUNTER(lock_grace_extended) \
|
||||
EXPAND_COUNTER(lock_grace_set) \
|
||||
EXPAND_COUNTER(lock_grace_wait) \
|
||||
EXPAND_COUNTER(lock_grant_request) \
|
||||
EXPAND_COUNTER(lock_grant_response) \
|
||||
EXPAND_COUNTER(lock_grant_work) \
|
||||
EXPAND_COUNTER(lock_invalidate_coverage) \
|
||||
EXPAND_COUNTER(lock_invalidate_inode) \
|
||||
EXPAND_COUNTER(lock_invalidate_request) \
|
||||
@@ -128,11 +139,13 @@
|
||||
EXPAND_COUNTER(lock_lock_error) \
|
||||
EXPAND_COUNTER(lock_nonblock_eagain) \
|
||||
EXPAND_COUNTER(lock_recover_request) \
|
||||
EXPAND_COUNTER(lock_scan_objects) \
|
||||
EXPAND_COUNTER(lock_shrink_attempted) \
|
||||
EXPAND_COUNTER(lock_shrink_aborted) \
|
||||
EXPAND_COUNTER(lock_shrink_work) \
|
||||
EXPAND_COUNTER(lock_unlock) \
|
||||
EXPAND_COUNTER(lock_wait) \
|
||||
EXPAND_COUNTER(log_merge_wait_timeout) \
|
||||
EXPAND_COUNTER(net_dropped_response) \
|
||||
EXPAND_COUNTER(net_send_bytes) \
|
||||
EXPAND_COUNTER(net_send_error) \
|
||||
@@ -143,6 +156,15 @@
|
||||
EXPAND_COUNTER(net_recv_invalid_message) \
|
||||
EXPAND_COUNTER(net_recv_messages) \
|
||||
EXPAND_COUNTER(net_unknown_request) \
|
||||
EXPAND_COUNTER(orphan_scan) \
|
||||
EXPAND_COUNTER(orphan_scan_attempts) \
|
||||
EXPAND_COUNTER(orphan_scan_cached) \
|
||||
EXPAND_COUNTER(orphan_scan_error) \
|
||||
EXPAND_COUNTER(orphan_scan_item) \
|
||||
EXPAND_COUNTER(orphan_scan_omap_set) \
|
||||
EXPAND_COUNTER(quota_info_count_objects) \
|
||||
EXPAND_COUNTER(quota_info_scan_objects) \
|
||||
EXPAND_COUNTER(quorum_candidate_server_stopping) \
|
||||
EXPAND_COUNTER(quorum_elected) \
|
||||
EXPAND_COUNTER(quorum_fence_error) \
|
||||
EXPAND_COUNTER(quorum_fence_leader) \
|
||||
@@ -153,6 +175,7 @@
|
||||
EXPAND_COUNTER(quorum_recv_resignation) \
|
||||
EXPAND_COUNTER(quorum_recv_vote) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat) \
|
||||
EXPAND_COUNTER(quorum_send_heartbeat_dropped) \
|
||||
EXPAND_COUNTER(quorum_send_resignation) \
|
||||
EXPAND_COUNTER(quorum_send_request) \
|
||||
EXPAND_COUNTER(quorum_send_vote) \
|
||||
@@ -164,6 +187,7 @@
|
||||
EXPAND_COUNTER(srch_add_entry) \
|
||||
EXPAND_COUNTER(srch_compact_dirty_block) \
|
||||
EXPAND_COUNTER(srch_compact_entry) \
|
||||
EXPAND_COUNTER(srch_compact_error) \
|
||||
EXPAND_COUNTER(srch_compact_flush) \
|
||||
EXPAND_COUNTER(srch_compact_log_page) \
|
||||
EXPAND_COUNTER(srch_compact_removed_entry) \
|
||||
@@ -173,21 +197,23 @@
|
||||
EXPAND_COUNTER(srch_search_retry_empty) \
|
||||
EXPAND_COUNTER(srch_search_sorted) \
|
||||
EXPAND_COUNTER(srch_search_sorted_block) \
|
||||
EXPAND_COUNTER(srch_search_stale_eio) \
|
||||
EXPAND_COUNTER(srch_search_stale_retry) \
|
||||
EXPAND_COUNTER(srch_search_xattrs) \
|
||||
EXPAND_COUNTER(srch_read_stale) \
|
||||
EXPAND_COUNTER(statfs) \
|
||||
EXPAND_COUNTER(totl_read_copied) \
|
||||
EXPAND_COUNTER(totl_read_item) \
|
||||
EXPAND_COUNTER(trans_commit_data_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_dirty_meta_full) \
|
||||
EXPAND_COUNTER(trans_commit_fsync) \
|
||||
EXPAND_COUNTER(trans_commit_meta_alloc_low) \
|
||||
EXPAND_COUNTER(trans_commit_sync_fs) \
|
||||
EXPAND_COUNTER(trans_commit_timer) \
|
||||
EXPAND_COUNTER(trans_commit_written)
|
||||
EXPAND_COUNTER(trans_commit_written) \
|
||||
EXPAND_COUNTER(wkic_count_objects) \
|
||||
EXPAND_COUNTER(wkic_scan_objects)
|
||||
|
||||
#define FIRST_COUNTER alloc_alloc_data
|
||||
#define LAST_COUNTER trans_commit_written
|
||||
#define LAST_COUNTER wkic_scan_objects
|
||||
|
||||
#undef EXPAND_COUNTER
|
||||
#define EXPAND_COUNTER(which) struct percpu_counter which;
|
||||
@@ -214,12 +240,12 @@ struct scoutfs_counters {
|
||||
#define SCOUTFS_PCPU_COUNTER_BATCH (1 << 30)
|
||||
|
||||
#define scoutfs_inc_counter(sb, which) \
|
||||
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, 1, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, 1, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
|
||||
#define scoutfs_add_counter(sb, which, cnt) \
|
||||
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, cnt, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, cnt, \
|
||||
SCOUTFS_PCPU_COUNTER_BATCH)
|
||||
|
||||
void __init scoutfs_init_counters(void);
|
||||
int scoutfs_setup_counters(struct super_block *sb);
|
||||
|
||||
820
kmod/src/data.c
820
kmod/src/data.c
File diff suppressed because it is too large
Load Diff
@@ -38,18 +38,14 @@ struct scoutfs_data_wait {
|
||||
.err = 0, \
|
||||
}
|
||||
|
||||
struct scoutfs_traced_extent {
|
||||
u64 iblock;
|
||||
u64 count;
|
||||
u64 blkno;
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
extern const struct address_space_operations scoutfs_file_aops;
|
||||
extern const struct file_operations scoutfs_file_fops;
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
|
||||
int scoutfs_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh,
|
||||
int create);
|
||||
|
||||
int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
|
||||
u64 ino, u64 iblock, u64 last, bool offline,
|
||||
struct scoutfs_lock *lock);
|
||||
@@ -59,7 +55,8 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len);
|
||||
int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
|
||||
u64 byte_len, struct inode *to, u64 to_off);
|
||||
u64 byte_len, struct inode *to, u64 to_off, bool to_stage,
|
||||
u64 data_version);
|
||||
|
||||
int scoutfs_data_wait_check(struct inode *inode, loff_t pos, loff_t len,
|
||||
u8 sef, u8 op, struct scoutfs_data_wait *ow,
|
||||
@@ -85,7 +82,7 @@ void scoutfs_data_init_btrees(struct super_block *sb,
|
||||
void scoutfs_data_get_btrees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt);
|
||||
int scoutfs_data_prepare_commit(struct super_block *sb);
|
||||
u64 scoutfs_data_alloc_free_bytes(struct super_block *sb);
|
||||
bool scoutfs_data_alloc_should_refill(struct super_block *sb, u64 blocks);
|
||||
|
||||
int scoutfs_data_setup(struct super_block *sb);
|
||||
void scoutfs_data_destroy(struct super_block *sb);
|
||||
|
||||
1060
kmod/src/dir.c
1060
kmod/src/dir.c
File diff suppressed because it is too large
Load Diff
@@ -5,16 +5,24 @@
|
||||
#include "lock.h"
|
||||
|
||||
extern const struct file_operations scoutfs_dir_fops;
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
extern const struct inode_operations_wrapper scoutfs_dir_iops;
|
||||
#else
|
||||
extern const struct inode_operations scoutfs_dir_iops;
|
||||
#endif
|
||||
extern const struct inode_operations scoutfs_symlink_iops;
|
||||
|
||||
extern const struct dentry_operations scoutfs_dentry_ops;
|
||||
|
||||
struct scoutfs_link_backref_entry {
|
||||
struct list_head head;
|
||||
u64 dir_ino;
|
||||
u64 dir_pos;
|
||||
u16 name_len;
|
||||
u8 d_type;
|
||||
bool last;
|
||||
struct scoutfs_dirent dent;
|
||||
/* the full name is allocated and stored in dent.name[0] */
|
||||
/* the full name is allocated and stored in dent.name[] */
|
||||
};
|
||||
|
||||
int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,
|
||||
@@ -22,14 +30,10 @@ int scoutfs_dir_get_backref_path(struct super_block *sb, u64 ino, u64 dir_ino,
|
||||
void scoutfs_dir_free_backref_path(struct super_block *sb,
|
||||
struct list_head *list);
|
||||
|
||||
int scoutfs_dir_add_next_linkref(struct super_block *sb, u64 ino,
|
||||
u64 dir_ino, u64 dir_pos,
|
||||
struct list_head *list);
|
||||
int scoutfs_dir_add_next_linkrefs(struct super_block *sb, u64 ino, u64 dir_ino, u64 dir_pos,
|
||||
int count, struct list_head *list);
|
||||
|
||||
int scoutfs_symlink_drop(struct super_block *sb, u64 ino,
|
||||
struct scoutfs_lock *lock, u64 i_size);
|
||||
|
||||
int scoutfs_dir_init(void);
|
||||
void scoutfs_dir_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -81,7 +81,7 @@ static struct dentry *scoutfs_fh_to_dentry(struct super_block *sb,
|
||||
trace_scoutfs_fh_to_dentry(sb, fh_type, sfid);
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type))
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino));
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->ino), 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -100,7 +100,7 @@ static struct dentry *scoutfs_fh_to_parent(struct super_block *sb,
|
||||
|
||||
if (scoutfs_valid_fileid(fh_type) &&
|
||||
fh_type == FILEID_SCOUTFS_WITH_PARENT)
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino));
|
||||
inode = scoutfs_iget(sb, le64_to_cpu(sfid->parent_ino), 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -114,8 +114,8 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
int ret;
|
||||
u64 ino;
|
||||
|
||||
ret = scoutfs_dir_add_next_linkref(sb, scoutfs_ino(inode), 0, 0, &list);
|
||||
if (ret)
|
||||
ret = scoutfs_dir_add_next_linkrefs(sb, scoutfs_ino(inode), 0, 0, 1, &list);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ent = list_first_entry(&list, struct scoutfs_link_backref_entry, head);
|
||||
@@ -123,7 +123,7 @@ static struct dentry *scoutfs_get_parent(struct dentry *child)
|
||||
scoutfs_dir_free_backref_path(sb, &list);
|
||||
trace_scoutfs_get_parent(sb, inode, ino);
|
||||
|
||||
inode = scoutfs_iget(sb, ino);
|
||||
inode = scoutfs_iget(sb, ino, 0, SCOUTFS_IGF_LINKED);
|
||||
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
@@ -138,9 +138,9 @@ static int scoutfs_get_name(struct dentry *parent, char *name,
|
||||
LIST_HEAD(list);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_dir_add_next_linkref(sb, scoutfs_ino(inode), dir_ino,
|
||||
0, &list);
|
||||
if (ret)
|
||||
ret = scoutfs_dir_add_next_linkrefs(sb, scoutfs_ino(inode), dir_ino,
|
||||
0, 1, &list);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = -ENOENT;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "msg.h"
|
||||
#include "ext.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
@@ -38,7 +39,7 @@ static bool ext_overlap(struct scoutfs_extent *ext, u64 start, u64 len)
|
||||
return !(e_end < start || ext->start > end);
|
||||
}
|
||||
|
||||
static bool ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
|
||||
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out)
|
||||
{
|
||||
u64 in_end = start + len - 1;
|
||||
u64 out_end = out->start + out->len - 1;
|
||||
@@ -191,6 +192,9 @@ int scoutfs_ext_insert(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
/* inserting extent must not overlap */
|
||||
if (found.len && ext_overlap(&ins, found.start, found.len)) {
|
||||
if (ops->insert_overlap_warn)
|
||||
scoutfs_err(sb, "inserting extent %llu.%llu overlaps existing %llu.%llu",
|
||||
start, len, found.start, found.len);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -241,7 +245,9 @@ int scoutfs_ext_remove(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
goto out;
|
||||
|
||||
/* removed extent must be entirely within found */
|
||||
if (!ext_inside(start, len, &found)) {
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
scoutfs_err(sb, "error removing extent %llu.%llu, isn't inside existing %llu.%llu",
|
||||
start, len, found.start, found.len);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -341,7 +347,7 @@ int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
|
||||
if (ret == 0 && ext_overlap(&found, start, len)) {
|
||||
/* set extent must be entirely within found */
|
||||
if (!ext_inside(start, len, &found)) {
|
||||
if (!scoutfs_ext_inside(start, len, &found)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ struct scoutfs_ext_ops {
|
||||
u64 start, u64 len, u64 map, u8 flags);
|
||||
int (*remove)(struct super_block *sb, void *arg, u64 start, u64 len,
|
||||
u64 map, u8 flags);
|
||||
|
||||
bool insert_overlap_warn;
|
||||
};
|
||||
|
||||
bool scoutfs_ext_can_merge(struct scoutfs_extent *left,
|
||||
@@ -31,5 +33,6 @@ int scoutfs_ext_alloc(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
struct scoutfs_extent *ext);
|
||||
int scoutfs_ext_set(struct super_block *sb, struct scoutfs_ext_ops *ops,
|
||||
void *arg, u64 start, u64 len, u64 map, u8 flags);
|
||||
bool scoutfs_ext_inside(u64 start, u64 len, struct scoutfs_extent *out);
|
||||
|
||||
#endif
|
||||
|
||||
481
kmod/src/fence.c
Normal file
481
kmod/src/fence.c
Normal file
@@ -0,0 +1,481 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/timer.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "msg.h"
|
||||
#include "sysfs.h"
|
||||
#include "server.h"
|
||||
#include "fence.h"
|
||||
|
||||
/*
|
||||
* Fencing ensures that a given mount can no longer write to the
|
||||
* metadata or data devices. It's necessary to ensure that it's safe to
|
||||
* give another mount access to a resource that is currently owned by a
|
||||
* mount that has stopped responding.
|
||||
*
|
||||
* Fencing is performed in collaboration between the currently elected
|
||||
* quorum leader mount and userspace running on its host. The kernel
|
||||
* creates fencing requests as it notices that mounts have stopped
|
||||
* participating. The fence requests are published as directories in
|
||||
* sysfs. Userspace agents watch for directories, take action, and
|
||||
* write to files in the directory to indicate that the mount has been
|
||||
* fenced. Once the mount is fenced the server can reclaim the
|
||||
* resources previously held by the fenced mount.
|
||||
*
|
||||
* The fence requests contain metadata identifying the specific instance
|
||||
* of the mount that needs to be fenced. This lets a fencing agent
|
||||
* ensure that a specific mount has been fenced without necessarily
|
||||
* destroying the node that was hosting it. Maybe the node had rebooted
|
||||
* and the mount is no longer there, maybe the mount can be force
|
||||
* unmounted, maybe the node can be configured to isolate the mount from
|
||||
* the devices.
|
||||
*
|
||||
* The fencing mechanism is asynchronous and can fail but the server
|
||||
* cannot make progress until it completes. If a fence request times
|
||||
* out the server shuts down in the hope that another instance of a
|
||||
* server might have more luck fencing a non-responsive mount.
|
||||
*
|
||||
* Sources of fencing are fundamentally anchored in shared persistent
|
||||
* state. It is possible, though unlikely, that servers can fence a
|
||||
* node and then themselves fail, leaving the next server to try and
|
||||
* fence the mount again.
|
||||
*/
|
||||
|
||||
struct fence_info {
|
||||
struct kset *kset;
|
||||
struct kobject fence_dir_kobj;
|
||||
struct workqueue_struct *wq;
|
||||
wait_queue_head_t waitq;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define DECLARE_FENCE_INFO(sb, name) \
|
||||
struct fence_info *name = SCOUTFS_SB(sb)->fence_info
|
||||
|
||||
struct pending_fence {
|
||||
struct super_block *sb;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
struct list_head entry;
|
||||
struct timer_list timer;
|
||||
|
||||
ktime_t start_kt;
|
||||
__be32 ipv4_addr;
|
||||
bool fenced;
|
||||
bool error;
|
||||
int reason;
|
||||
u64 rid;
|
||||
};
|
||||
|
||||
#define FENCE_FROM_KOBJ(kobj) \
|
||||
container_of(SCOUTFS_SYSFS_ATTRS(kobj), struct pending_fence, ssa)
|
||||
#define DECLARE_FENCE_FROM_KOBJ(name, kobj) \
|
||||
struct pending_fence *name = FENCE_FROM_KOBJ(kobj)
|
||||
|
||||
static void destroy_fence(struct pending_fence *fence)
|
||||
{
|
||||
struct super_block *sb = fence->sb;
|
||||
|
||||
scoutfs_sysfs_destroy_attrs(sb, &fence->ssa);
|
||||
del_timer_sync(&fence->timer);
|
||||
kfree(fence);
|
||||
}
|
||||
|
||||
static ssize_t elapsed_secs_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
ktime_t now = ktime_get();
|
||||
ktime_t t = ns_to_ktime(0);
|
||||
|
||||
if (ktime_after(now, fence->start_kt))
|
||||
t = ktime_sub(now, fence->start_kt);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", (long long)ktime_divns(t, NSEC_PER_SEC));
|
||||
}
|
||||
SCOUTFS_ATTR_RO(elapsed_secs);
|
||||
|
||||
static ssize_t fenced_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", !!fence->fenced);
|
||||
}
|
||||
|
||||
/*
|
||||
* any write to the fenced file from userspace indicates that the mount
|
||||
* has been safely fenced and can no longer write to the shared device.
|
||||
*/
|
||||
static ssize_t fenced_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
DECLARE_FENCE_INFO(fence->sb, fi);
|
||||
|
||||
if (!fence->fenced) {
|
||||
del_timer_sync(&fence->timer);
|
||||
fence->fenced = true;
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(fenced);
|
||||
|
||||
static ssize_t error_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", !!fence->error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fencing can tell us that they were unable to fence the given mount.
|
||||
* We can't continue if the mount can't be isolated so we shut down the
|
||||
* server.
|
||||
*/
|
||||
static ssize_t error_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
struct super_block *sb = fence->sb;
|
||||
DECLARE_FENCE_INFO(fence->sb, fi);
|
||||
|
||||
if (!fence->error) {
|
||||
fence->error = true;
|
||||
scoutfs_err(sb, "error indicated by fence action for rid %016llx", fence->rid);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(error);
|
||||
|
||||
static ssize_t ipv4_addr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pI4", &fence->ipv4_addr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(ipv4_addr);
|
||||
|
||||
static ssize_t reason_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
unsigned r = fence->reason;
|
||||
char *str = "unknown";
|
||||
static char *reasons[] = {
|
||||
[SCOUTFS_FENCE_CLIENT_RECOVERY] = "client_recovery",
|
||||
[SCOUTFS_FENCE_CLIENT_RECONNECT] = "client_reconnect",
|
||||
[SCOUTFS_FENCE_QUORUM_BLOCK_LEADER] = "quorum_block_leader",
|
||||
};
|
||||
|
||||
if (r < ARRAY_SIZE(reasons) && reasons[r])
|
||||
str = reasons[r];
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", str);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(reason);
|
||||
|
||||
static ssize_t rid_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
DECLARE_FENCE_FROM_KOBJ(fence, kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx", fence->rid);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(rid);
|
||||
|
||||
static struct attribute *fence_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(elapsed_secs),
|
||||
SCOUTFS_ATTR_PTR(fenced),
|
||||
SCOUTFS_ATTR_PTR(error),
|
||||
SCOUTFS_ATTR_PTR(ipv4_addr),
|
||||
SCOUTFS_ATTR_PTR(reason),
|
||||
SCOUTFS_ATTR_PTR(rid),
|
||||
NULL,
|
||||
};
|
||||
|
||||
#define FENCE_TIMEOUT_MS (MSEC_PER_SEC * 30)
|
||||
|
||||
static void fence_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct pending_fence *fence = from_timer(fence, timer, timer);
|
||||
struct super_block *sb = fence->sb;
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
|
||||
fence->error = true;
|
||||
scoutfs_err(sb, "fence request for rid %016llx was not serviced in %lums, raising error",
|
||||
fence->rid, FENCE_TIMEOUT_MS);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret;
|
||||
|
||||
fence = kzalloc(sizeof(struct pending_fence), GFP_NOFS);
|
||||
if (!fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fence->sb = sb;
|
||||
scoutfs_sysfs_init_attrs(sb, &fence->ssa);
|
||||
|
||||
fence->start_kt = ktime_get();
|
||||
fence->ipv4_addr = ipv4_addr;
|
||||
fence->fenced = false;
|
||||
fence->error = false;
|
||||
fence->reason = reason;
|
||||
fence->rid = rid;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs_parent(sb, &fi->kset->kobj,
|
||||
&fence->ssa, fence_attrs,
|
||||
"%016llx", rid);
|
||||
if (ret < 0) {
|
||||
kfree(fence);
|
||||
goto out;
|
||||
}
|
||||
|
||||
timer_setup(&fence->timer, fence_timeout, 0);
|
||||
fence->timer.expires = jiffies + msecs_to_jiffies(FENCE_TIMEOUT_MS);
|
||||
add_timer(&fence->timer);
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_add_tail(&fence->entry, &fi->list);
|
||||
spin_unlock(&fi->lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the caller the rid of the next fence request which has been
|
||||
* fenced. This doesn't have a position from which to return the next
|
||||
* because the caller either frees the fence request it's given or shuts
|
||||
* down.
|
||||
*/
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret = -ENOENT;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->fenced || fence->error) {
|
||||
*rid = fence->rid;
|
||||
*reason = fence->reason;
|
||||
*error = fence->error;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
bool pending = false;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->reason == reason) {
|
||||
pending = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
int ret = -ENOENT;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->rid == rid) {
|
||||
list_del_init(&fence->entry);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (ret == 0) {
|
||||
destroy_fence(fence);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool all_fenced(struct fence_info *fi, bool *error)
|
||||
{
|
||||
struct pending_fence *fence;
|
||||
bool all = true;
|
||||
|
||||
*error = false;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_for_each_entry(fence, &fi->list, entry) {
|
||||
if (fence->error) {
|
||||
*error = true;
|
||||
all = true;
|
||||
break;
|
||||
}
|
||||
if (!fence->fenced) {
|
||||
all = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return all;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller waits for all the current requests to be fenced, but not
|
||||
* necessarily reclaimed.
|
||||
*/
|
||||
int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
bool error;
|
||||
long ret;
|
||||
|
||||
ret = wait_event_timeout(fi->waitq, all_fenced(fi, &error), timeout_jiffies);
|
||||
if (ret == 0)
|
||||
ret = -ETIMEDOUT;
|
||||
else if (ret > 0)
|
||||
ret = 0;
|
||||
else if (error)
|
||||
ret = -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called early during startup so that it is guaranteed that
|
||||
* no other subsystems will try and call fence_start while we're waiting
|
||||
* for testing fence requests to complete.
|
||||
*/
|
||||
int scoutfs_fence_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_mount_options opts;
|
||||
struct fence_info *fi;
|
||||
int ret;
|
||||
|
||||
/* can only fence if we can be elected by quorum */
|
||||
scoutfs_options_read(sb, &opts);
|
||||
if (opts.quorum_slot_nr == -1) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fi = kzalloc(sizeof(struct fence_info), GFP_KERNEL);
|
||||
if (!fi) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&fi->waitq);
|
||||
spin_lock_init(&fi->lock);
|
||||
INIT_LIST_HEAD(&fi->list);
|
||||
|
||||
sbi->fence_info = fi;
|
||||
|
||||
fi->kset = kset_create_and_add("fence", NULL, scoutfs_sysfs_sb_dir(sb));
|
||||
if (!fi->kset) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fi->wq = alloc_workqueue("scoutfs_fence",
|
||||
WQ_UNBOUND | WQ_NON_REENTRANT, 0);
|
||||
if (!fi->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_fence_destroy(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down all pending fence requests because the server is shutting down.
|
||||
*/
|
||||
void scoutfs_fence_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FENCE_INFO(sb, fi);
|
||||
struct pending_fence *fence;
|
||||
|
||||
do {
|
||||
spin_lock(&fi->lock);
|
||||
fence = list_first_entry_or_null(&fi->list, struct pending_fence, entry);
|
||||
if (fence)
|
||||
list_del_init(&fence->entry);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (fence) {
|
||||
destroy_fence(fence);
|
||||
wake_up(&fi->waitq);
|
||||
}
|
||||
} while (fence);
|
||||
}
|
||||
|
||||
void scoutfs_fence_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct fence_info *fi = SCOUTFS_SB(sb)->fence_info;
|
||||
struct pending_fence *fence;
|
||||
struct pending_fence *tmp;
|
||||
|
||||
if (fi) {
|
||||
if (fi->wq)
|
||||
destroy_workqueue(fi->wq);
|
||||
list_for_each_entry_safe(fence, tmp, &fi->list, entry)
|
||||
destroy_fence(fence);
|
||||
if (fi->kset)
|
||||
kset_unregister(fi->kset);
|
||||
kfree(fi);
|
||||
sbi->fence_info = NULL;
|
||||
}
|
||||
}
|
||||
20
kmod/src/fence.h
Normal file
20
kmod/src/fence.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef _SCOUTFS_FENCE_H_
|
||||
#define _SCOUTFS_FENCE_H_
|
||||
|
||||
enum {
|
||||
SCOUTFS_FENCE_CLIENT_RECOVERY,
|
||||
SCOUTFS_FENCE_CLIENT_RECONNECT,
|
||||
SCOUTFS_FENCE_QUORUM_BLOCK_LEADER,
|
||||
};
|
||||
|
||||
int scoutfs_fence_start(struct super_block *sb, u64 rid, __be32 ipv4_addr, int reason);
|
||||
int scoutfs_fence_next(struct super_block *sb, u64 *rid, int *reason, bool *error);
|
||||
int scoutfs_fence_reason_pending(struct super_block *sb, int reason);
|
||||
int scoutfs_fence_free(struct super_block *sb, u64 rid);
|
||||
int scoutfs_fence_wait_fenced(struct super_block *sb, long timeout_jiffies);
|
||||
|
||||
int scoutfs_fence_setup(struct super_block *sb);
|
||||
void scoutfs_fence_stop(struct super_block *sb);
|
||||
void scoutfs_fence_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
176
kmod/src/file.c
176
kmod/src/file.c
@@ -27,8 +27,16 @@
|
||||
#include "file.h"
|
||||
#include "inode.h"
|
||||
#include "per_task.h"
|
||||
#include "omap.h"
|
||||
#include "quota.h"
|
||||
|
||||
/* TODO: Direct I/O, AIO */
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
/*
|
||||
* Start a high level file read. We check for offline extents in the
|
||||
* read region here so that we only check the extents once. We use the
|
||||
* dio count to prevent releasing while we're reading after we've
|
||||
* checked the extents.
|
||||
*/
|
||||
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos)
|
||||
{
|
||||
@@ -36,37 +44,39 @@ ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
inode_lock(inode);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
inode_unlock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
/* protect checked extents from stage/release */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
ret = scoutfs_data_wait_check_iov(inode, iov, nr_segs, pos,
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ,
|
||||
&dw, inode_lock);
|
||||
&dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
|
||||
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
|
||||
|
||||
out:
|
||||
if (scoutfs_per_task_del(&si->pt_data_lock, &pt_ent))
|
||||
inode_dio_done(inode);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
inode_dio_done(inode);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
@@ -84,7 +94,7 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
@@ -93,34 +103,42 @@ ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_lock(inode);
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &inode_lock);
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, inode_lock);
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, inode_lock)) {
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode),
|
||||
SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE,
|
||||
&dw, inode_lock);
|
||||
&dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
@@ -138,8 +156,119 @@ out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
int ret;
|
||||
|
||||
int scoutfs_permission(struct inode *inode, int mask)
|
||||
retry:
|
||||
/* protect checked extents from release */
|
||||
inode_lock(inode);
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
inode_unlock(inode);
|
||||
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_READ,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
ret = scoutfs_data_wait_check(inode, iocb->ki_pos, iov_iter_count(to), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_READ, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
|
||||
out:
|
||||
inode_dio_end(inode);
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *scoutfs_inode_lock = NULL;
|
||||
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
|
||||
DECLARE_DATA_WAIT(dw);
|
||||
ssize_t ret;
|
||||
|
||||
retry:
|
||||
inode_lock(inode);
|
||||
ret = scoutfs_lock_inode(sb, SCOUTFS_LOCK_WRITE,
|
||||
SCOUTFS_LKF_REFRESH_INODE, inode, &scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = generic_write_checks(iocb, from);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_inode_check_retention(inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_complete_truncate(inode, scoutfs_inode_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_quota_check_data(sb, inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
|
||||
/* data_version is per inode, whole file must be online */
|
||||
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode), SEF_OFFLINE,
|
||||
SCOUTFS_IOC_DWO_WRITE, &dw, scoutfs_inode_lock);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX: remove SUID bit */
|
||||
|
||||
ret = __generic_file_write_iter(iocb, from);
|
||||
|
||||
out:
|
||||
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
|
||||
scoutfs_unlock(sb, scoutfs_inode_lock, SCOUTFS_LOCK_WRITE);
|
||||
inode_unlock(inode);
|
||||
|
||||
if (scoutfs_data_wait_found(&dw)) {
|
||||
ret = scoutfs_data_wait(inode, &dw);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct scoutfs_lock *inode_lock = NULL;
|
||||
@@ -153,7 +282,8 @@ int scoutfs_permission(struct inode *inode, int mask)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = generic_permission(inode, mask);
|
||||
ret = generic_permission(KC_VFS_INIT_NS
|
||||
inode, mask);
|
||||
|
||||
scoutfs_unlock(sb, inode_lock, SCOUTFS_LOCK_READ);
|
||||
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
#ifndef _SCOUTFS_FILE_H_
|
||||
#define _SCOUTFS_FILE_H_
|
||||
|
||||
#ifdef KC_LINUX_HAVE_FOP_AIO_READ
|
||||
ssize_t scoutfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
ssize_t scoutfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
int scoutfs_permission(struct inode *inode, int mask);
|
||||
#else
|
||||
ssize_t scoutfs_file_read_iter(struct kiocb *, struct iov_iter *);
|
||||
ssize_t scoutfs_file_write_iter(struct kiocb *, struct iov_iter *);
|
||||
#endif
|
||||
int scoutfs_permission(KC_VFS_NS_DEF
|
||||
struct inode *inode, int mask);
|
||||
loff_t scoutfs_file_llseek(struct file *file, loff_t offset, int whence);
|
||||
|
||||
#endif /* _SCOUTFS_FILE_H_ */
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "hash.h"
|
||||
#include "srch.h"
|
||||
#include "counters.h"
|
||||
#include "xattr.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
@@ -37,9 +38,9 @@
|
||||
*
|
||||
* The log btrees are modified by multiple transactions over time so
|
||||
* there is no consistent ordering relationship between the items in
|
||||
* different btrees. Each item in a log btree stores a version number
|
||||
* for the item. Readers check log btrees for the most recent version
|
||||
* that it should use.
|
||||
* different btrees. Each item in a log btree stores a seq for the
|
||||
* item. Readers check log btrees for the most recent seq that it
|
||||
* should use.
|
||||
*
|
||||
* The item cache reads items in bulk from stable btrees, and writes a
|
||||
* transaction's worth of dirty items into the item log btree.
|
||||
@@ -52,6 +53,8 @@
|
||||
*/
|
||||
|
||||
struct forest_info {
|
||||
struct super_block *sb;
|
||||
|
||||
struct mutex mutex;
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
@@ -60,6 +63,11 @@ struct forest_info {
|
||||
struct mutex srch_mutex;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
struct scoutfs_block *srch_bl;
|
||||
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work log_merge_dwork;
|
||||
|
||||
atomic64_t inode_count_delta;
|
||||
};
|
||||
|
||||
#define DECLARE_FOREST_INFO(sb, name) \
|
||||
@@ -70,11 +78,6 @@ struct forest_refs {
|
||||
struct scoutfs_block_ref logs_ref;
|
||||
};
|
||||
|
||||
/* initialize some refs that initially aren't equal */
|
||||
#define DECLARE_STALE_TRACKING_SUPER_REFS(a, b) \
|
||||
struct forest_refs a = {{cpu_to_le64(0),}}; \
|
||||
struct forest_refs b = {{cpu_to_le64(1),}}
|
||||
|
||||
struct forest_bloom_nrs {
|
||||
unsigned int nrs[SCOUTFS_FOREST_BLOOM_NRS];
|
||||
};
|
||||
@@ -128,11 +131,11 @@ static struct scoutfs_block *read_bloom_ref(struct super_block *sb, struct scout
|
||||
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *next)
|
||||
{
|
||||
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_btree_root item_root;
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
struct scoutfs_key found;
|
||||
struct scoutfs_key ltk;
|
||||
bool checked_fs;
|
||||
@@ -147,8 +150,6 @@ retry:
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
refs.fs_ref = roots.fs_root.ref;
|
||||
refs.logs_ref = roots.logs_root.ref;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
checked_fs = false;
|
||||
@@ -204,37 +205,25 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0)
|
||||
return -EIO;
|
||||
prev_refs = refs;
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.fs_root.ref, &roots.logs_root.ref);
|
||||
if (ret == -ESTALE)
|
||||
goto retry;
|
||||
}
|
||||
out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct forest_read_items_data {
|
||||
bool is_fs;
|
||||
int fic;
|
||||
scoutfs_forest_item_cb cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static int forest_read_items(struct super_block *sb, struct scoutfs_key *key,
|
||||
static int forest_read_items(struct super_block *sb, struct scoutfs_key *key, u64 seq, u8 flags,
|
||||
void *val, int val_len, void *arg)
|
||||
{
|
||||
struct forest_read_items_data *rid = arg;
|
||||
struct scoutfs_log_item_value _liv = {0,};
|
||||
struct scoutfs_log_item_value *liv = &_liv;
|
||||
|
||||
if (!rid->is_fs) {
|
||||
liv = val;
|
||||
val += sizeof(struct scoutfs_log_item_value);
|
||||
val_len -= sizeof(struct scoutfs_log_item_value);
|
||||
}
|
||||
|
||||
return rid->cb(sb, key, liv, val, val_len, rid->cb_arg);
|
||||
return rid->cb(sb, key, seq, flags, val, val_len, rid->fic, rid->cb_arg);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -246,60 +235,48 @@ static int forest_read_items(struct super_block *sb, struct scoutfs_key *key,
|
||||
* that covers all the blocks. Any keys outside of this range can't be
|
||||
* trusted because we didn't visit all the trees to check their items.
|
||||
*
|
||||
* If we hit stale blocks and retry we can call the callback for
|
||||
* duplicate items. This is harmless because the items are stable while
|
||||
* the caller holds their cluster lock and the caller has to filter out
|
||||
* item versions anyway.
|
||||
* We return -ESTALE if we hit stale blocks to give the caller a chance
|
||||
* to reset their state and retry with a newer version of the btrees.
|
||||
*/
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_lock *lock,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
DECLARE_STALE_TRACKING_SUPER_REFS(prev_refs, refs);
|
||||
struct forest_read_items_data rid = {
|
||||
.cb = cb,
|
||||
.cb_arg = arg,
|
||||
};
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_bloom_block *bb;
|
||||
struct forest_bloom_nrs bloom;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_block *bl;
|
||||
struct scoutfs_key ltk;
|
||||
struct scoutfs_key orig_start = *start;
|
||||
struct scoutfs_key orig_end = *end;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
scoutfs_inc_counter(sb, forest_read_items);
|
||||
calc_bloom_nrs(&bloom, &lock->start);
|
||||
calc_bloom_nrs(&bloom, bloom_key);
|
||||
|
||||
roots = lock->roots;
|
||||
retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
trace_scoutfs_forest_using_roots(sb, &roots->fs_root, &roots->logs_root);
|
||||
|
||||
trace_scoutfs_forest_using_roots(sb, &roots.fs_root, &roots.logs_root);
|
||||
refs.fs_ref = roots.fs_root.ref;
|
||||
refs.logs_ref = roots.logs_root.ref;
|
||||
|
||||
*start = lock->start;
|
||||
*end = lock->end;
|
||||
*start = orig_start;
|
||||
*end = orig_end;
|
||||
|
||||
/* start with fs root items */
|
||||
rid.is_fs = true;
|
||||
ret = scoutfs_btree_read_items(sb, &roots.fs_root, key, start, end,
|
||||
rid.fic |= FIC_FS_ROOT;
|
||||
ret = scoutfs_btree_read_items(sb, &roots->fs_root, key, start, end,
|
||||
forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
rid.is_fs = false;
|
||||
rid.fic &= ~FIC_FS_ROOT;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &roots.logs_root, <k, &iref);
|
||||
ret = scoutfs_btree_next(sb, &roots->logs_root, <k, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(lt)) {
|
||||
ltk = *iref.key;
|
||||
@@ -340,30 +317,57 @@ retry:
|
||||
|
||||
scoutfs_inc_counter(sb, forest_bloom_pass);
|
||||
|
||||
if ((le64_to_cpu(lt.flags) & SCOUTFS_LOG_TREES_FINALIZED))
|
||||
rid.fic |= FIC_FINALIZED;
|
||||
|
||||
ret = scoutfs_btree_read_items(sb, <.item_root, key, start,
|
||||
end, forest_read_items, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
rid.fic &= ~FIC_FINALIZED;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_refs, &refs, sizeof(refs)) == 0) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
prev_refs = refs;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg)
|
||||
{
|
||||
struct scoutfs_net_roots roots;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_forest_read_items_roots(sb, &roots, key, bloom_key, start, end,
|
||||
cb, arg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the items are deltas then combine the src with the destination
|
||||
* value and store the result in the destination.
|
||||
*
|
||||
* Returns:
|
||||
* -errno: fatal error, no change
|
||||
* 0: not delta items, no change
|
||||
* +ve: SCOUTFS_DELTA_ values indicating when dst and/or src can be dropped
|
||||
*/
|
||||
int scoutfs_forest_combine_deltas(struct scoutfs_key *key, void *dst, int dst_len,
|
||||
void *src, int src_len)
|
||||
{
|
||||
if (key->sk_zone == SCOUTFS_XATTR_TOTL_ZONE)
|
||||
return scoutfs_xattr_combine_totl(dst, dst_len, src, src_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that the bloom bits for the lock's start key are all set in
|
||||
* the current log's bloom block. We record the nr of our log tree in
|
||||
@@ -433,29 +437,29 @@ out:
|
||||
|
||||
/*
|
||||
* The caller is commiting items in the transaction and has found the
|
||||
* greatest item version amongst them. We store it in the log_trees root
|
||||
* greatest item seq amongst them. We store it in the log_trees root
|
||||
* to send to the server.
|
||||
*/
|
||||
void scoutfs_forest_set_max_vers(struct super_block *sb, u64 max_vers)
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
finf->our_log.max_item_vers = cpu_to_le64(max_vers);
|
||||
finf->our_log.max_item_seq = cpu_to_le64(max_seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is calling during setup to find the greatest item version
|
||||
* The server is calling during setup to find the greatest item seq
|
||||
* amongst all the log tree roots. They have the authoritative current
|
||||
* super.
|
||||
*
|
||||
* Item versions are only used to compare items in log trees, not in the
|
||||
* main fs tree. All we have to do is find the greatest version amongst
|
||||
* the log_trees so that new locks will have a write_version greater
|
||||
* than all the items in the log_trees.
|
||||
* Item seqs are only used to compare items in log trees, not in the
|
||||
* main fs tree. All we have to do is find the greatest seq amongst the
|
||||
* log_trees so that the core seq will have a greater seq than all the
|
||||
* items in the log_trees.
|
||||
*/
|
||||
int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *vers)
|
||||
int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *seq)
|
||||
{
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
@@ -463,7 +467,7 @@ int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
int ret;
|
||||
|
||||
scoutfs_key_init_log_trees(<k, 0, 0);
|
||||
*vers = 0;
|
||||
*seq = 0;
|
||||
|
||||
for (;; scoutfs_key_inc(<k)) {
|
||||
ret = scoutfs_btree_next(sb, &super->logs_root, <k, &iref);
|
||||
@@ -471,8 +475,7 @@ int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
if (iref.val_len == sizeof(struct scoutfs_log_trees)) {
|
||||
ltk = *iref.key;
|
||||
lt = iref.val;
|
||||
*vers = max(*vers,
|
||||
le64_to_cpu(lt->max_item_vers));
|
||||
*seq = max(*seq, le64_to_cpu(lt->max_item_seq));
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
@@ -521,6 +524,59 @@ int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_forest_inc_inode_count(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
atomic64_inc(&finf->inode_count_delta);
|
||||
}
|
||||
|
||||
void scoutfs_forest_dec_inode_count(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
atomic64_dec(&finf->inode_count_delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the total inode count from the super block and all the
|
||||
* log_btrees it references. ESTALE from read blocks is returned to the
|
||||
* caller who is expected to retry or return hard errors.
|
||||
*/
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count)
|
||||
{
|
||||
struct scoutfs_log_trees *lt;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
*inode_count = le64_to_cpu(super->inode_count);
|
||||
|
||||
scoutfs_key_init_log_trees(&key, 0, 0);
|
||||
for (;;) {
|
||||
ret = scoutfs_btree_next(sb, &super->logs_root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(*lt)) {
|
||||
key = *iref.key;
|
||||
scoutfs_key_inc(&key);
|
||||
lt = iref.val;
|
||||
*inode_count += le64_to_cpu(lt->inode_count_delta);
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from transactions as a new transaction opens and is
|
||||
* serialized with all writers.
|
||||
@@ -541,7 +597,7 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
memset(&finf->our_log, 0, sizeof(finf->our_log));
|
||||
finf->our_log.item_root = lt->item_root;
|
||||
finf->our_log.bloom_ref = lt->bloom_ref;
|
||||
finf->our_log.max_item_vers = lt->max_item_vers;
|
||||
finf->our_log.max_item_seq = lt->max_item_seq;
|
||||
finf->our_log.rid = lt->rid;
|
||||
finf->our_log.nr = lt->nr;
|
||||
finf->srch_file = lt->srch_file;
|
||||
@@ -549,6 +605,8 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
WARN_ON_ONCE(finf->srch_bl); /* commiting should have put the block */
|
||||
finf->srch_bl = NULL;
|
||||
|
||||
atomic64_set(&finf->inode_count_delta, le64_to_cpu(lt->inode_count_delta));
|
||||
|
||||
trace_scoutfs_forest_init_our_log(sb, le64_to_cpu(lt->rid),
|
||||
le64_to_cpu(lt->nr),
|
||||
le64_to_cpu(lt->item_root.ref.blkno),
|
||||
@@ -571,15 +629,138 @@ void scoutfs_forest_get_btrees(struct super_block *sb,
|
||||
lt->item_root = finf->our_log.item_root;
|
||||
lt->bloom_ref = finf->our_log.bloom_ref;
|
||||
lt->srch_file = finf->srch_file;
|
||||
lt->max_item_vers = finf->our_log.max_item_vers;
|
||||
lt->max_item_seq = finf->our_log.max_item_seq;
|
||||
|
||||
scoutfs_block_put(sb, finf->srch_bl);
|
||||
finf->srch_bl = NULL;
|
||||
|
||||
lt->inode_count_delta = cpu_to_le64(atomic64_read(&finf->inode_count_delta));
|
||||
|
||||
trace_scoutfs_forest_prepare_commit(sb, <->item_root.ref,
|
||||
<->bloom_ref);
|
||||
}
|
||||
|
||||
#define LOG_MERGE_DELAY_MS (5 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* Regularly try to get a log merge request from the server. If we get
|
||||
* a request we walk the log_trees items to find input trees and pass
|
||||
* them to btree_merge. All of our work is done in dirty blocks
|
||||
* allocated from available free blocks that the server gave us. If we
|
||||
* hit an error then we drop our dirty blocks without writing them and
|
||||
* send an error flag to the server so they can reclaim our allocators
|
||||
* and ignore the rest of our work.
|
||||
*/
|
||||
static void scoutfs_forest_log_merge_worker(struct work_struct *work)
|
||||
{
|
||||
struct forest_info *finf = container_of(work, struct forest_info,
|
||||
log_merge_dwork.work);
|
||||
struct super_block *sb = finf->sb;
|
||||
struct scoutfs_btree_root_head *rhead = NULL;
|
||||
struct scoutfs_btree_root_head *tmp;
|
||||
struct scoutfs_log_merge_complete comp;
|
||||
struct scoutfs_log_merge_request req;
|
||||
struct scoutfs_log_trees *lt;
|
||||
struct scoutfs_block_writer wri;
|
||||
struct scoutfs_alloc alloc;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key next;
|
||||
struct scoutfs_key key;
|
||||
unsigned long delay;
|
||||
LIST_HEAD(inputs);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_log_merge(sb, &req);
|
||||
if (ret < 0)
|
||||
goto resched;
|
||||
|
||||
comp.root = req.root;
|
||||
comp.start = req.start;
|
||||
comp.end = req.end;
|
||||
comp.remain = req.end;
|
||||
comp.rid = req.rid;
|
||||
comp.seq = req.seq;
|
||||
comp.flags = 0;
|
||||
|
||||
scoutfs_alloc_init(&alloc, &req.meta_avail, &req.meta_freed);
|
||||
scoutfs_block_writer_init(sb, &wri);
|
||||
|
||||
/* find finalized input log trees within the input seq */
|
||||
for (scoutfs_key_init_log_trees(&key, 0, 0); ; scoutfs_key_inc(&key)) {
|
||||
|
||||
if (!rhead) {
|
||||
rhead = kmalloc(sizeof(*rhead), GFP_NOFS);
|
||||
if (!rhead) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_next(sb, &req.logs_root, &key, &iref);
|
||||
if (ret == 0) {
|
||||
if (iref.val_len == sizeof(*lt)) {
|
||||
key = *iref.key;
|
||||
lt = iref.val;
|
||||
if (lt->item_root.ref.blkno != 0 &&
|
||||
(le64_to_cpu(lt->flags) & SCOUTFS_LOG_TREES_FINALIZED) &&
|
||||
(le64_to_cpu(lt->finalize_seq) < le64_to_cpu(req.input_seq))) {
|
||||
rhead->root = lt->item_root;
|
||||
list_add_tail(&rhead->head, &inputs);
|
||||
rhead = NULL;
|
||||
}
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* shouldn't be possible, but it's harmless */
|
||||
if (list_empty(&inputs)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_btree_merge(sb, &alloc, &wri, &req.start, &req.end,
|
||||
&next, &comp.root, &inputs,
|
||||
!!(req.flags & cpu_to_le64(SCOUTFS_LOG_MERGE_REQUEST_SUBTREE)),
|
||||
SCOUTFS_LOG_MERGE_DIRTY_BYTE_LIMIT, 10,
|
||||
(2 * 1024 * 1024));
|
||||
if (ret == -ERANGE) {
|
||||
comp.remain = next;
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_REMAIN);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
scoutfs_alloc_prepare_commit(sb, &alloc, &wri);
|
||||
if (ret == 0)
|
||||
ret = scoutfs_block_writer_write(sb, &wri);
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
|
||||
comp.meta_avail = alloc.avail;
|
||||
comp.meta_freed = alloc.freed;
|
||||
if (ret < 0)
|
||||
le64_add_cpu(&comp.flags, SCOUTFS_LOG_MERGE_COMP_ERROR);
|
||||
|
||||
ret = scoutfs_client_commit_log_merge(sb, &comp);
|
||||
|
||||
kfree(rhead);
|
||||
list_for_each_entry_safe(rhead, tmp, &inputs, head)
|
||||
kfree(rhead);
|
||||
|
||||
resched:
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(LOG_MERGE_DELAY_MS);
|
||||
queue_delayed_work(finf->workq, &finf->log_merge_dwork, delay);
|
||||
}
|
||||
|
||||
int scoutfs_forest_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -593,10 +774,20 @@ int scoutfs_forest_setup(struct super_block *sb)
|
||||
}
|
||||
|
||||
/* the finf fields will be setup as we open a transaction */
|
||||
finf->sb = sb;
|
||||
mutex_init(&finf->mutex);
|
||||
mutex_init(&finf->srch_mutex);
|
||||
|
||||
INIT_DELAYED_WORK(&finf->log_merge_dwork,
|
||||
scoutfs_forest_log_merge_worker);
|
||||
sbi->forest_info = finf;
|
||||
|
||||
finf->workq = alloc_workqueue("scoutfs_log_merge", WQ_NON_REENTRANT |
|
||||
WQ_UNBOUND | WQ_HIGHPRI, 0);
|
||||
if (!finf->workq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
@@ -605,6 +796,24 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void scoutfs_forest_start(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
queue_delayed_work(finf->workq, &finf->log_merge_dwork,
|
||||
msecs_to_jiffies(LOG_MERGE_DELAY_MS));
|
||||
}
|
||||
|
||||
void scoutfs_forest_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_FOREST_INFO(sb, finf);
|
||||
|
||||
if (finf && finf->workq) {
|
||||
cancel_delayed_work_sync(&finf->log_merge_dwork);
|
||||
destroy_workqueue(finf->workq);
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_forest_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -612,6 +821,7 @@ void scoutfs_forest_destroy(struct super_block *sb)
|
||||
|
||||
if (finf) {
|
||||
scoutfs_block_put(sb, finf->srch_bl);
|
||||
|
||||
kfree(finf);
|
||||
sbi->forest_info = NULL;
|
||||
}
|
||||
|
||||
@@ -4,33 +4,45 @@
|
||||
struct scoutfs_alloc;
|
||||
struct scoutfs_block_writer;
|
||||
struct scoutfs_block;
|
||||
struct scoutfs_lock;
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
/* caller gives an item to the callback */
|
||||
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_log_item_value *liv,
|
||||
void *val, int val_len, void *arg);
|
||||
enum {
|
||||
FIC_FS_ROOT = (1 << 0),
|
||||
FIC_FINALIZED = (1 << 1),
|
||||
};
|
||||
typedef int (*scoutfs_forest_item_cb)(struct super_block *sb, struct scoutfs_key *key, u64 seq,
|
||||
u8 flags, void *val, int val_len, int fic, void *arg);
|
||||
|
||||
int scoutfs_forest_next_hint(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *next);
|
||||
int scoutfs_forest_read_items(struct super_block *sb,
|
||||
struct scoutfs_lock *lock,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start,
|
||||
struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_read_items_roots(struct super_block *sb, struct scoutfs_net_roots *roots,
|
||||
struct scoutfs_key *key, struct scoutfs_key *bloom_key,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end,
|
||||
scoutfs_forest_item_cb cb, void *arg);
|
||||
int scoutfs_forest_set_bloom_bits(struct super_block *sb,
|
||||
struct scoutfs_lock *lock);
|
||||
void scoutfs_forest_set_max_vers(struct super_block *sb, u64 max_vers);
|
||||
int scoutfs_forest_get_max_vers(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *vers);
|
||||
void scoutfs_forest_set_max_seq(struct super_block *sb, u64 max_seq);
|
||||
int scoutfs_forest_get_max_seq(struct super_block *sb,
|
||||
struct scoutfs_super_block *super,
|
||||
u64 *seq);
|
||||
int scoutfs_forest_insert_list(struct super_block *sb,
|
||||
struct scoutfs_btree_item_list *lst);
|
||||
int scoutfs_forest_srch_add(struct super_block *sb, u64 hash, u64 ino, u64 id);
|
||||
|
||||
void scoutfs_forest_inc_inode_count(struct super_block *sb);
|
||||
void scoutfs_forest_dec_inode_count(struct super_block *sb);
|
||||
int scoutfs_forest_inode_count(struct super_block *sb, struct scoutfs_super_block *super,
|
||||
u64 *inode_count);
|
||||
|
||||
void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
@@ -38,7 +50,15 @@ void scoutfs_forest_init_btrees(struct super_block *sb,
|
||||
void scoutfs_forest_get_btrees(struct super_block *sb,
|
||||
struct scoutfs_log_trees *lt);
|
||||
|
||||
/* > 0 error codes */
|
||||
#define SCOUTFS_DELTA_COMBINED 1 /* src val was combined, drop src */
|
||||
#define SCOUTFS_DELTA_COMBINED_NULL 2 /* combined val has no data, drop both */
|
||||
int scoutfs_forest_combine_deltas(struct scoutfs_key *key, void *dst, int dst_len,
|
||||
void *src, int src_len);
|
||||
|
||||
int scoutfs_forest_setup(struct super_block *sb);
|
||||
void scoutfs_forest_start(struct super_block *sb);
|
||||
void scoutfs_forest_stop(struct super_block *sb);
|
||||
void scoutfs_forest_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,8 +1,20 @@
|
||||
#ifndef _SCOUTFS_FORMAT_H_
|
||||
#define _SCOUTFS_FORMAT_H_
|
||||
|
||||
#define SCOUTFS_INTEROP_VERSION 0ULL
|
||||
#define SCOUTFS_INTEROP_VERSION_STR __stringify(0)
|
||||
/*
|
||||
* The format version defines the format of structures on devices,
|
||||
* structures that are communicated over the wire, and the protocol
|
||||
* behind the structures.
|
||||
*/
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN 1
|
||||
#define SCOUTFS_FORMAT_VERSION_MIN_STR __stringify(SCOUTFS_FORMAT_VERSION_MIN)
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX 2
|
||||
#define SCOUTFS_FORMAT_VERSION_MAX_STR __stringify(SCOUTFS_FORMAT_VERSION_MAX)
|
||||
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_RETENTION 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_PROJECT_ID 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_QUOTA 2
|
||||
#define SCOUTFS_FORMAT_VERSION_FEAT_INDX_TAG 2
|
||||
|
||||
/* statfs(2) f_type */
|
||||
#define SCOUTFS_SUPER_MAGIC 0x554f4353 /* "SCOU" */
|
||||
@@ -168,6 +180,15 @@ struct scoutfs_key {
|
||||
#define sko_rid _sk_first
|
||||
#define sko_ino _sk_second
|
||||
|
||||
/* quota rules */
|
||||
#define skqr_hash _sk_second
|
||||
#define skqr_coll_nr _sk_third
|
||||
|
||||
/* xattr totl */
|
||||
#define skxt_a _sk_first
|
||||
#define skxt_b _sk_second
|
||||
#define skxt_c _sk_third
|
||||
|
||||
/* inode */
|
||||
#define ski_ino _sk_first
|
||||
|
||||
@@ -195,22 +216,16 @@ struct scoutfs_key {
|
||||
#define sklt_rid _sk_first
|
||||
#define sklt_nr _sk_second
|
||||
|
||||
/* lock clients */
|
||||
#define sklc_rid _sk_first
|
||||
|
||||
/* seqs */
|
||||
#define skts_trans_seq _sk_first
|
||||
#define skts_rid _sk_second
|
||||
|
||||
/* mounted clients */
|
||||
#define skmc_rid _sk_first
|
||||
|
||||
/* free extents by blkno */
|
||||
#define skfb_end _sk_second
|
||||
#define skfb_len _sk_third
|
||||
/* free extents by len */
|
||||
#define skfl_neglen _sk_second
|
||||
#define skfl_blkno _sk_third
|
||||
#define skfb_end _sk_first
|
||||
#define skfb_len _sk_second
|
||||
/* free extents by order */
|
||||
#define skfo_revord _sk_first
|
||||
#define skfo_end _sk_second
|
||||
#define skfo_len _sk_third
|
||||
|
||||
struct scoutfs_avl_root {
|
||||
__le16 node;
|
||||
@@ -246,11 +261,15 @@ struct scoutfs_btree_root {
|
||||
struct scoutfs_btree_item {
|
||||
struct scoutfs_avl_node node;
|
||||
struct scoutfs_key key;
|
||||
__le64 seq;
|
||||
__le16 val_off;
|
||||
__le16 val_len;
|
||||
__u8 __pad[4];
|
||||
__u8 flags;
|
||||
__u8 __pad[3];
|
||||
};
|
||||
|
||||
#define SCOUTFS_ITEM_FLAG_DELETION (1 << 0)
|
||||
|
||||
struct scoutfs_btree_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_avl_root item_root;
|
||||
@@ -259,7 +278,7 @@ struct scoutfs_btree_block {
|
||||
__le16 mid_free_len;
|
||||
__u8 level;
|
||||
__u8 __pad[7];
|
||||
struct scoutfs_btree_item items[0];
|
||||
struct scoutfs_btree_item items[];
|
||||
/* leaf blocks have a fixed size item offset hash table at the end */
|
||||
};
|
||||
|
||||
@@ -288,9 +307,10 @@ struct scoutfs_alloc_list_head {
|
||||
struct scoutfs_block_ref ref;
|
||||
__le64 total_nr;
|
||||
__le32 first_nr;
|
||||
__u8 __pad[4];
|
||||
__le32 flags;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* While the main allocator uses extent items in btree blocks, metadata
|
||||
* allocations for a single transaction are recorded in arrays in
|
||||
@@ -307,7 +327,7 @@ struct scoutfs_alloc_list_block {
|
||||
struct scoutfs_block_ref next;
|
||||
__le32 start;
|
||||
__le32 nr;
|
||||
__le64 blknos[0]; /* naturally aligned for sorting */
|
||||
__le64 blknos[]; /* naturally aligned for sorting */
|
||||
};
|
||||
|
||||
#define SCOUTFS_ALLOC_LIST_MAX_BLOCKS \
|
||||
@@ -319,17 +339,25 @@ struct scoutfs_alloc_list_block {
|
||||
*/
|
||||
struct scoutfs_alloc_root {
|
||||
__le64 total_len;
|
||||
__le32 flags;
|
||||
__le32 _pad;
|
||||
struct scoutfs_btree_root root;
|
||||
};
|
||||
|
||||
/* Shared by _alloc_list_head and _alloc_root */
|
||||
#define SCOUTFS_ALLOC_FLAG_LOW (1U << 0)
|
||||
|
||||
/* types of allocators, exposed to alloc_detail ioctl */
|
||||
#define SCOUTFS_ALLOC_OWNER_NONE 0
|
||||
#define SCOUTFS_ALLOC_OWNER_SERVER 1
|
||||
#define SCOUTFS_ALLOC_OWNER_MOUNT 2
|
||||
#define SCOUTFS_ALLOC_OWNER_SRCH 3
|
||||
#define SCOUTFS_ALLOC_OWNER_LOG_MERGE 4
|
||||
|
||||
struct scoutfs_mounted_client_btree_val {
|
||||
union scoutfs_inet_addr addr;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
};
|
||||
|
||||
#define SCOUTFS_MOUNTED_CLIENT_QUORUM (1 << 0)
|
||||
@@ -362,7 +390,7 @@ struct scoutfs_srch_file {
|
||||
|
||||
struct scoutfs_srch_parent {
|
||||
struct scoutfs_block_header hdr;
|
||||
struct scoutfs_block_ref refs[0];
|
||||
struct scoutfs_block_ref refs[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_SRCH_PARENT_REFS \
|
||||
@@ -377,7 +405,7 @@ struct scoutfs_srch_block {
|
||||
struct scoutfs_srch_entry tail;
|
||||
__le32 entry_nr;
|
||||
__le32 entry_bytes;
|
||||
__u8 entries[0];
|
||||
__u8 entries[];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -430,10 +458,20 @@ struct scoutfs_srch_compact {
|
||||
/* client -> server: compaction failed */
|
||||
#define SCOUTFS_SRCH_COMPACT_FLAG_ERROR (1 << 5)
|
||||
|
||||
#define SCOUTFS_DATA_ALLOC_MAX_ZONES 1024
|
||||
#define SCOUTFS_DATA_ALLOC_ZONE_BYTES DIV_ROUND_UP(SCOUTFS_DATA_ALLOC_MAX_ZONES, 8)
|
||||
#define SCOUTFS_DATA_ALLOC_ZONE_LE64S DIV_ROUND_UP(SCOUTFS_DATA_ALLOC_MAX_ZONES, 64)
|
||||
|
||||
/*
|
||||
* XXX I imagine we should rename these now that they've evolved to track
|
||||
* all the btrees that clients use during a transaction. It's not just
|
||||
* about item logs, it's about clients making changes to trees.
|
||||
*
|
||||
* @get_trans_seq, @commit_trans_seq: These pair of sequence numbers
|
||||
* determine if a transaction is currently open for the mount that owns
|
||||
* the log_trees struct. get_trans_seq is advanced by the server as the
|
||||
* transaction is opened. The server sets comimt_trans_seq equal to
|
||||
* get_ as the transaction is committed.
|
||||
*/
|
||||
struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
@@ -443,31 +481,27 @@ struct scoutfs_log_trees {
|
||||
struct scoutfs_alloc_root data_avail;
|
||||
struct scoutfs_alloc_root data_freed;
|
||||
struct scoutfs_srch_file srch_file;
|
||||
__le64 max_item_vers;
|
||||
__le64 data_alloc_zone_blocks;
|
||||
__le64 data_alloc_zones[SCOUTFS_DATA_ALLOC_ZONE_LE64S];
|
||||
__le64 inode_count_delta;
|
||||
__le64 get_trans_seq;
|
||||
__le64 commit_trans_seq;
|
||||
__le64 max_item_seq;
|
||||
__le64 finalize_seq;
|
||||
__le64 rid;
|
||||
__le64 nr;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
struct scoutfs_log_item_value {
|
||||
__le64 vers;
|
||||
__u8 flags;
|
||||
__u8 __pad[7];
|
||||
__u8 data[0];
|
||||
};
|
||||
#define SCOUTFS_LOG_TREES_FINALIZED (1ULL << 0)
|
||||
|
||||
/*
|
||||
* FS items are limited by the max btree value length with the log item
|
||||
* value header.
|
||||
*/
|
||||
#define SCOUTFS_MAX_VAL_SIZE \
|
||||
(SCOUTFS_BTREE_MAX_VAL_LEN - sizeof(struct scoutfs_log_item_value))
|
||||
|
||||
#define SCOUTFS_LOG_ITEM_FLAG_DELETION (1 << 0)
|
||||
/* FS items are limited by the max btree value length */
|
||||
#define SCOUTFS_MAX_VAL_SIZE SCOUTFS_BTREE_MAX_VAL_LEN
|
||||
|
||||
struct scoutfs_bloom_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 total_set;
|
||||
__le64 bits[0];
|
||||
__le64 bits[];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -484,50 +518,127 @@ struct scoutfs_bloom_block {
|
||||
member_sizeof(struct scoutfs_bloom_block, bits[0]) * 8)
|
||||
#define SCOUTFS_FOREST_BLOOM_FUNC_BITS (SCOUTFS_BLOCK_LG_SHIFT + 3)
|
||||
|
||||
/*
|
||||
* A private server btree item which records the status of a log merge
|
||||
* operation that is in progress.
|
||||
*/
|
||||
struct scoutfs_log_merge_status {
|
||||
struct scoutfs_key next_range_key;
|
||||
__le64 nr_requests;
|
||||
__le64 nr_complete;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* A request is sent to the client and stored in a server btree item to
|
||||
* record resources that would be reclaimed if the client failed. It
|
||||
* has all the inputs needed for the client to perform its portion of a
|
||||
* merge.
|
||||
*/
|
||||
struct scoutfs_log_merge_request {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root logs_root;
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
__le64 input_seq;
|
||||
__le64 rid;
|
||||
__le64 seq;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
/* request root is subtree of fs root at parent, restricted merging modifications */
|
||||
#define SCOUTFS_LOG_MERGE_REQUEST_SUBTREE (1ULL << 0)
|
||||
|
||||
/*
|
||||
* The output of a client's merge of log btree items into a subtree
|
||||
* rooted at a parent in the fs_root. The client sends it to the
|
||||
* server, who stores it in a btree item for later splicing/rebalancing.
|
||||
*/
|
||||
struct scoutfs_log_merge_complete {
|
||||
struct scoutfs_alloc_list_head meta_avail;
|
||||
struct scoutfs_alloc_list_head meta_freed;
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
struct scoutfs_key remain;
|
||||
__le64 rid;
|
||||
__le64 seq;
|
||||
__le64 flags;
|
||||
};
|
||||
|
||||
/* merge failed, ignore completion and reclaim stored request */
|
||||
#define SCOUTFS_LOG_MERGE_COMP_ERROR (1ULL << 0)
|
||||
/* merge didn't complete range, restart from remain */
|
||||
#define SCOUTFS_LOG_MERGE_COMP_REMAIN (1ULL << 1)
|
||||
|
||||
/*
|
||||
* Range items record the ranges of the fs keyspace that still need to
|
||||
* be merged. They're added as a merge starts, removed as requests are
|
||||
* sent and added back if the request didn't consume its entire range.
|
||||
*/
|
||||
struct scoutfs_log_merge_range {
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
};
|
||||
|
||||
struct scoutfs_log_merge_freeing {
|
||||
struct scoutfs_btree_root root;
|
||||
struct scoutfs_key key;
|
||||
__le64 seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* Keys are first sorted by major key zones.
|
||||
*/
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 1
|
||||
#define SCOUTFS_RID_ZONE 2
|
||||
#define SCOUTFS_FS_ZONE 3
|
||||
#define SCOUTFS_LOCK_ZONE 4
|
||||
#define SCOUTFS_INODE_INDEX_ZONE 4
|
||||
#define SCOUTFS_ORPHAN_ZONE 8
|
||||
#define SCOUTFS_QUOTA_ZONE 10
|
||||
#define SCOUTFS_XATTR_TOTL_ZONE 12
|
||||
#define SCOUTFS_XATTR_INDX_ZONE 14
|
||||
#define SCOUTFS_FS_ZONE 16
|
||||
#define SCOUTFS_LOCK_ZONE 20
|
||||
/* Items only stored in server btrees */
|
||||
#define SCOUTFS_LOG_TREES_ZONE 6
|
||||
#define SCOUTFS_LOCK_CLIENTS_ZONE 7
|
||||
#define SCOUTFS_TRANS_SEQ_ZONE 8
|
||||
#define SCOUTFS_MOUNTED_CLIENT_ZONE 9
|
||||
#define SCOUTFS_SRCH_ZONE 10
|
||||
#define SCOUTFS_FREE_EXTENT_ZONE 11
|
||||
#define SCOUTFS_LOG_TREES_ZONE 24
|
||||
#define SCOUTFS_MOUNTED_CLIENT_ZONE 28
|
||||
#define SCOUTFS_SRCH_ZONE 32
|
||||
#define SCOUTFS_FREE_EXTENT_BLKNO_ZONE 36
|
||||
#define SCOUTFS_FREE_EXTENT_ORDER_ZONE 40
|
||||
/* Items only stored in log merge server btrees */
|
||||
#define SCOUTFS_LOG_MERGE_STATUS_ZONE 44
|
||||
#define SCOUTFS_LOG_MERGE_RANGE_ZONE 48
|
||||
#define SCOUTFS_LOG_MERGE_REQUEST_ZONE 52
|
||||
#define SCOUTFS_LOG_MERGE_COMPLETE_ZONE 56
|
||||
#define SCOUTFS_LOG_MERGE_FREEING_ZONE 60
|
||||
|
||||
/* inode index zone */
|
||||
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 1
|
||||
#define SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE 2
|
||||
#define SCOUTFS_INODE_INDEX_NR 3 /* don't forget to update */
|
||||
#define SCOUTFS_INODE_INDEX_META_SEQ_TYPE 4
|
||||
#define SCOUTFS_INODE_INDEX_DATA_SEQ_TYPE 8
|
||||
|
||||
/* rid zone (also used in server alloc btree) */
|
||||
#define SCOUTFS_ORPHAN_TYPE 1
|
||||
/* orphan zone, redundant type used for clarity */
|
||||
#define SCOUTFS_ORPHAN_TYPE 4
|
||||
|
||||
/* quota zone */
|
||||
#define SCOUTFS_QUOTA_RULE_TYPE 4
|
||||
|
||||
/* fs zone */
|
||||
#define SCOUTFS_INODE_TYPE 1
|
||||
#define SCOUTFS_XATTR_TYPE 2
|
||||
#define SCOUTFS_DIRENT_TYPE 3
|
||||
#define SCOUTFS_READDIR_TYPE 4
|
||||
#define SCOUTFS_LINK_BACKREF_TYPE 5
|
||||
#define SCOUTFS_SYMLINK_TYPE 6
|
||||
#define SCOUTFS_DATA_EXTENT_TYPE 7
|
||||
#define SCOUTFS_INODE_TYPE 4
|
||||
#define SCOUTFS_XATTR_TYPE 8
|
||||
#define SCOUTFS_DIRENT_TYPE 12
|
||||
#define SCOUTFS_READDIR_TYPE 16
|
||||
#define SCOUTFS_LINK_BACKREF_TYPE 20
|
||||
#define SCOUTFS_SYMLINK_TYPE 24
|
||||
#define SCOUTFS_DATA_EXTENT_TYPE 28
|
||||
|
||||
/* lock zone, only ever found in lock ranges, never in persistent items */
|
||||
#define SCOUTFS_RENAME_TYPE 1
|
||||
#define SCOUTFS_RENAME_TYPE 4
|
||||
|
||||
/* srch zone, only in server btrees */
|
||||
#define SCOUTFS_SRCH_LOG_TYPE 1
|
||||
#define SCOUTFS_SRCH_BLOCKS_TYPE 2
|
||||
#define SCOUTFS_SRCH_PENDING_TYPE 3
|
||||
#define SCOUTFS_SRCH_BUSY_TYPE 4
|
||||
|
||||
/* free extents in allocator btrees in client and server, by blkno or len */
|
||||
#define SCOUTFS_FREE_EXTENT_BLKNO_TYPE 1
|
||||
#define SCOUTFS_FREE_EXTENT_LEN_TYPE 2
|
||||
#define SCOUTFS_SRCH_LOG_TYPE 4
|
||||
#define SCOUTFS_SRCH_BLOCKS_TYPE 8
|
||||
#define SCOUTFS_SRCH_PENDING_TYPE 12
|
||||
#define SCOUTFS_SRCH_BUSY_TYPE 16
|
||||
|
||||
/* file data extents have start and len in key */
|
||||
struct scoutfs_data_extent_val {
|
||||
@@ -549,9 +660,48 @@ struct scoutfs_xattr {
|
||||
__le16 val_len;
|
||||
__u8 name_len;
|
||||
__u8 __pad[5];
|
||||
__u8 name[0];
|
||||
__u8 name[];
|
||||
};
|
||||
|
||||
/*
|
||||
* .totl. xattrs are mapped to items. The dotted u64s in the xattr name
|
||||
* map to the item key. The item value total is the sum of all the
|
||||
* xattr values. The item value count records the number of xattrs
|
||||
* contributing to the total and is used when combining logged items to
|
||||
* determine if totals are being created or destroyed.
|
||||
*/
|
||||
struct scoutfs_xattr_totl_val {
|
||||
__le64 total;
|
||||
__le64 count;
|
||||
};
|
||||
|
||||
#define SQ_RF_TOTL_COUNT (1 << 0)
|
||||
#define SQ_RF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_NS_LITERAL 0
|
||||
#define SQ_NS_PROJ 1
|
||||
#define SQ_NS_UID 2
|
||||
#define SQ_NS_GID 3
|
||||
#define SQ_NS__NR 4
|
||||
#define SQ_NS__NR_SELECT (SQ_NS__NR - 1) /* !literal */
|
||||
|
||||
#define SQ_NF_SELECT (1 << 0)
|
||||
#define SQ_NF__UNKNOWN (~((1 << 1) - 1))
|
||||
|
||||
#define SQ_OP_INODE 0
|
||||
#define SQ_OP_DATA 1
|
||||
#define SQ_OP__NR 2
|
||||
|
||||
struct scoutfs_quota_rule_val {
|
||||
__le64 name_val[3];
|
||||
__le64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
/* XXX does this exist upstream somewhere? */
|
||||
#define member_sizeof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
|
||||
@@ -575,16 +725,25 @@ struct scoutfs_xattr {
|
||||
#define SCOUTFS_QUORUM_ELECT_VAR_MS 100
|
||||
|
||||
/*
|
||||
* Once a leader is elected they send out heartbeats at regular
|
||||
* intervals to force members to wait the much longer heartbeat timeout.
|
||||
* Once heartbeat timeout expires without receiving a heartbeat they'll
|
||||
* switch over the performing elections.
|
||||
* Once a leader is elected they send heartbeat messages to all quorum
|
||||
* members at regular intervals to force members to wait the much longer
|
||||
* heartbeat timeout. Once the heartbeat timeout expires without
|
||||
* receiving a heartbeat message a member will start an election.
|
||||
*
|
||||
* These determine how long it could take members to notice that a
|
||||
* leader has gone silent and start to elect a new leader.
|
||||
* leader has gone silent and start to elect a new leader. The
|
||||
* heartbeat timeout can be changed at run time by options.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_HB_IVAL_MS 100
|
||||
#define SCOUTFS_QUORUM_HB_TIMEO_MS (5 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_MIN_HB_TIMEO_MS (2 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_DEF_HB_TIMEO_MS (10 * MSEC_PER_SEC)
|
||||
#define SCOUTFS_QUORUM_MAX_HB_TIMEO_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
/*
|
||||
* A newly elected leader will give fencing some time before giving up and
|
||||
* shutting down.
|
||||
*/
|
||||
#define SCOUTFS_QUORUM_FENCE_TO_MS (15 * MSEC_PER_SEC)
|
||||
|
||||
struct scoutfs_quorum_message {
|
||||
__le64 fsid;
|
||||
@@ -617,35 +776,76 @@ struct scoutfs_quorum_config {
|
||||
} slots[SCOUTFS_QUORUM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 term;
|
||||
__le64 random_write_mark;
|
||||
__le64 flags;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 rid;
|
||||
struct scoutfs_timespec ts;
|
||||
} write, update_term, set_leader, clear_leader, fenced;
|
||||
enum {
|
||||
SCOUTFS_QUORUM_EVENT_BEGIN, /* quorum service starting up */
|
||||
SCOUTFS_QUORUM_EVENT_TERM, /* updated persistent term */
|
||||
SCOUTFS_QUORUM_EVENT_ELECT, /* won election */
|
||||
SCOUTFS_QUORUM_EVENT_FENCE, /* server fenced others */
|
||||
SCOUTFS_QUORUM_EVENT_STOP, /* server stopped */
|
||||
SCOUTFS_QUORUM_EVENT_END, /* quorum service shutting down */
|
||||
SCOUTFS_QUORUM_EVENT_NR,
|
||||
};
|
||||
|
||||
#define SCOUTFS_QUORUM_BLOCK_LEADER (1 << 0)
|
||||
struct scoutfs_quorum_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 write_nr;
|
||||
struct scoutfs_quorum_block_event {
|
||||
__le64 write_nr;
|
||||
__le64 rid;
|
||||
__le64 term;
|
||||
struct scoutfs_timespec ts;
|
||||
} events[SCOUTFS_QUORUM_EVENT_NR];
|
||||
};
|
||||
|
||||
/*
|
||||
* Tunable options that apply to the entire system. They can be set in
|
||||
* mkfs or in sysfs files which send an rpc to the server to make the
|
||||
* change. The super version defines the options that exist.
|
||||
*
|
||||
* @set_bits: bits for each 64bit starting offset after set_bits
|
||||
* indicate which logical option is set.
|
||||
*
|
||||
* @data_alloc_zone_blocks: if set, the data device is logically divided
|
||||
* into contiguous zones of this many blocks. Data allocation will try
|
||||
* and isolate allocated extents for each mount to their own zone. The
|
||||
* zone size must be larger than the data alloc high water mark and
|
||||
* large enough such that the number of zones is kept within its static
|
||||
* limit.
|
||||
*/
|
||||
struct scoutfs_volume_options {
|
||||
__le64 set_bits;
|
||||
__le64 data_alloc_zone_blocks;
|
||||
__le64 __future_expansion[63];
|
||||
};
|
||||
|
||||
#define scoutfs_volopt_nr(field) \
|
||||
((offsetof(struct scoutfs_volume_options, field) - \
|
||||
(offsetof(struct scoutfs_volume_options, set_bits) + \
|
||||
member_sizeof(struct scoutfs_volume_options, set_bits))) / sizeof(__le64))
|
||||
#define scoutfs_volopt_bit(field) \
|
||||
(1ULL << scoutfs_volopt_nr(field))
|
||||
|
||||
#define SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_NR \
|
||||
scoutfs_volopt_nr(data_alloc_zone_blocks)
|
||||
#define SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_BIT \
|
||||
scoutfs_volopt_bit(data_alloc_zone_blocks)
|
||||
|
||||
#define SCOUTFS_VOLOPT_EXPANSION_BITS \
|
||||
(~(scoutfs_volopt_bit(__future_expansion) - 1))
|
||||
|
||||
#define SCOUTFS_FLAG_IS_META_BDEV 0x01
|
||||
|
||||
struct scoutfs_super_block {
|
||||
struct scoutfs_block_header hdr;
|
||||
__le64 id;
|
||||
__le64 version;
|
||||
__le64 fmt_vers;
|
||||
__le64 flags;
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 seq;
|
||||
__le64 next_ino;
|
||||
__le64 next_trans_seq;
|
||||
__le64 inode_count;
|
||||
__le64 total_meta_blocks; /* both static and dynamic */
|
||||
__le64 first_meta_blkno; /* first dynamically allocated */
|
||||
__le64 last_meta_blkno;
|
||||
__le64 total_data_blocks;
|
||||
__le64 first_data_blkno;
|
||||
__le64 last_data_blkno;
|
||||
struct scoutfs_quorum_config qconf;
|
||||
struct scoutfs_alloc_root meta_alloc[2];
|
||||
struct scoutfs_alloc_root data_alloc;
|
||||
@@ -653,10 +853,10 @@ struct scoutfs_super_block {
|
||||
struct scoutfs_alloc_list_head server_meta_freed[2];
|
||||
struct scoutfs_btree_root fs_root;
|
||||
struct scoutfs_btree_root logs_root;
|
||||
struct scoutfs_btree_root lock_clients;
|
||||
struct scoutfs_btree_root trans_seqs;
|
||||
struct scoutfs_btree_root log_merge;
|
||||
struct scoutfs_btree_root mounted_clients;
|
||||
struct scoutfs_btree_root srch_root;
|
||||
struct scoutfs_volume_options volopt;
|
||||
};
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
@@ -680,13 +880,6 @@ struct scoutfs_super_block {
|
||||
*
|
||||
* @offline_blocks: The number of fixed 4k blocks that could be made
|
||||
* online by staging.
|
||||
*
|
||||
* XXX
|
||||
* - otime?
|
||||
* - compat flags?
|
||||
* - version?
|
||||
* - generation?
|
||||
* - be more careful with rdev?
|
||||
*/
|
||||
struct scoutfs_inode {
|
||||
__le64 size;
|
||||
@@ -697,6 +890,7 @@ struct scoutfs_inode {
|
||||
__le64 offline_blocks;
|
||||
__le64 next_readdir_pos;
|
||||
__le64 next_xattr_id;
|
||||
__le64 version;
|
||||
__le32 nlink;
|
||||
__le32 uid;
|
||||
__le32 gid;
|
||||
@@ -706,9 +900,39 @@ struct scoutfs_inode {
|
||||
struct scoutfs_timespec atime;
|
||||
struct scoutfs_timespec ctime;
|
||||
struct scoutfs_timespec mtime;
|
||||
struct scoutfs_timespec crtime;
|
||||
__le64 proj;
|
||||
};
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INODE_FMT_V1_BYTES offsetof(struct scoutfs_inode, proj)
|
||||
|
||||
/*
|
||||
* There are so few versions that we don't mind doing this work inline
|
||||
* so that both utils and kernel can share these. Mounting has already
|
||||
* checked that the format version is within the supported min and max,
|
||||
* so these functions only deal with size variance within that band.
|
||||
*/
|
||||
/* Returns the native written inode size for the given format version, 0 for bad version */
|
||||
static inline int scoutfs_inode_vers_bytes(__u64 fmt_vers)
|
||||
{
|
||||
if (fmt_vers == 1)
|
||||
return SCOUTFS_INODE_FMT_V1_BYTES;
|
||||
else
|
||||
return sizeof(struct scoutfs_inode);
|
||||
}
|
||||
/*
|
||||
* Returns true if bytes is a valid inode size to read from the given
|
||||
* version. The given version must be greater than the version that
|
||||
* introduced the size.
|
||||
*/
|
||||
static inline int scoutfs_inode_valid_vers_bytes(__u64 fmt_vers, int bytes)
|
||||
{
|
||||
return (bytes == sizeof(struct scoutfs_inode) && fmt_vers == SCOUTFS_FORMAT_VERSION_MAX) ||
|
||||
(bytes == SCOUTFS_INODE_FMT_V1_BYTES);
|
||||
}
|
||||
|
||||
#define SCOUTFS_INO_FLAG_TRUNCATE 0x1
|
||||
#define SCOUTFS_INO_FLAG_RETENTION 0x2
|
||||
|
||||
#define SCOUTFS_ROOT_INO 1
|
||||
|
||||
@@ -729,7 +953,7 @@ struct scoutfs_dirent {
|
||||
__le64 pos;
|
||||
__u8 type;
|
||||
__u8 __pad[7];
|
||||
__u8 name[0];
|
||||
__u8 name[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NAME_LEN 255
|
||||
@@ -757,6 +981,7 @@ enum scoutfs_dentry_type {
|
||||
#define SCOUTFS_XATTR_MAX_NAME_LEN 255
|
||||
#define SCOUTFS_XATTR_MAX_VAL_LEN 65535
|
||||
#define SCOUTFS_XATTR_MAX_PART_SIZE SCOUTFS_MAX_VAL_SIZE
|
||||
#define SCOUTFS_XATTR_MAX_TOTL_U64 23 /* octal U64_MAX */
|
||||
|
||||
#define SCOUTFS_XATTR_NR_PARTS(name_len, val_len) \
|
||||
DIV_ROUND_UP(sizeof(struct scoutfs_xattr) + name_len + val_len, \
|
||||
@@ -787,7 +1012,7 @@ enum scoutfs_dentry_type {
|
||||
*/
|
||||
struct scoutfs_net_greeting {
|
||||
__le64 fsid;
|
||||
__le64 version;
|
||||
__le64 fmt_vers;
|
||||
__le64 server_term;
|
||||
__le64 rid;
|
||||
__le64 flags;
|
||||
@@ -818,7 +1043,6 @@ struct scoutfs_net_greeting {
|
||||
* response messages.
|
||||
*/
|
||||
struct scoutfs_net_header {
|
||||
__le64 clock_sync_id;
|
||||
__le64 seq;
|
||||
__le64 recv_seq;
|
||||
__le64 id;
|
||||
@@ -827,7 +1051,7 @@ struct scoutfs_net_header {
|
||||
__u8 flags;
|
||||
__u8 error;
|
||||
__u8 __pad[3];
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_FLAG_RESPONSE (1 << 0)
|
||||
@@ -838,13 +1062,21 @@ enum scoutfs_net_cmd {
|
||||
SCOUTFS_NET_CMD_ALLOC_INODES,
|
||||
SCOUTFS_NET_CMD_GET_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_SYNC_LOG_TREES,
|
||||
SCOUTFS_NET_CMD_GET_ROOTS,
|
||||
SCOUTFS_NET_CMD_ADVANCE_SEQ,
|
||||
SCOUTFS_NET_CMD_GET_LAST_SEQ,
|
||||
SCOUTFS_NET_CMD_LOCK,
|
||||
SCOUTFS_NET_CMD_LOCK_RECOVER,
|
||||
SCOUTFS_NET_CMD_SRCH_GET_COMPACT,
|
||||
SCOUTFS_NET_CMD_SRCH_COMMIT_COMPACT,
|
||||
SCOUTFS_NET_CMD_GET_LOG_MERGE,
|
||||
SCOUTFS_NET_CMD_COMMIT_LOG_MERGE,
|
||||
SCOUTFS_NET_CMD_OPEN_INO_MAP,
|
||||
SCOUTFS_NET_CMD_GET_VOLOPT,
|
||||
SCOUTFS_NET_CMD_SET_VOLOPT,
|
||||
SCOUTFS_NET_CMD_CLEAR_VOLOPT,
|
||||
SCOUTFS_NET_CMD_RESIZE_DEVICES,
|
||||
SCOUTFS_NET_CMD_STATFS,
|
||||
SCOUTFS_NET_CMD_FAREWELL,
|
||||
SCOUTFS_NET_CMD_UNKNOWN,
|
||||
};
|
||||
@@ -887,23 +1119,32 @@ struct scoutfs_net_roots {
|
||||
struct scoutfs_btree_root srch_root;
|
||||
};
|
||||
|
||||
struct scoutfs_net_resize_devices {
|
||||
__le64 new_total_meta_blocks;
|
||||
__le64 new_total_data_blocks;
|
||||
};
|
||||
|
||||
struct scoutfs_net_statfs {
|
||||
__u8 uuid[SCOUTFS_UUID_BYTES];
|
||||
__le64 free_meta_blocks;
|
||||
__le64 total_meta_blocks;
|
||||
__le64 free_data_blocks;
|
||||
__le64 total_data_blocks;
|
||||
__le64 inode_count;
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock {
|
||||
struct scoutfs_key key;
|
||||
__le64 write_version;
|
||||
__le64 write_seq;
|
||||
__u8 old_mode;
|
||||
__u8 new_mode;
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock_grant_response {
|
||||
struct scoutfs_net_lock nl;
|
||||
struct scoutfs_net_roots roots;
|
||||
};
|
||||
|
||||
struct scoutfs_net_lock_recover {
|
||||
__le16 nr;
|
||||
__u8 __pad[6];
|
||||
struct scoutfs_net_lock locks[0];
|
||||
struct scoutfs_net_lock locks[];
|
||||
};
|
||||
|
||||
#define SCOUTFS_NET_LOCK_MAX_RECOVER_NR \
|
||||
@@ -918,6 +1159,7 @@ enum scoutfs_lock_trace {
|
||||
SLT_INVALIDATE,
|
||||
SLT_REQUEST,
|
||||
SLT_RESPONSE,
|
||||
SLT_NR,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -970,4 +1212,42 @@ enum scoutfs_corruption_sources {
|
||||
|
||||
#define SC_NR_LONGS DIV_ROUND_UP(SC_NR_SOURCES, BITS_PER_LONG)
|
||||
|
||||
#define SCOUTFS_OPEN_INO_MAP_SHIFT 10
|
||||
#define SCOUTFS_OPEN_INO_MAP_BITS (1 << SCOUTFS_OPEN_INO_MAP_SHIFT)
|
||||
#define SCOUTFS_OPEN_INO_MAP_MASK (SCOUTFS_OPEN_INO_MAP_BITS - 1)
|
||||
#define SCOUTFS_OPEN_INO_MAP_LE64S (SCOUTFS_OPEN_INO_MAP_BITS / 64)
|
||||
|
||||
/*
|
||||
* The request and response conversation is as follows:
|
||||
*
|
||||
* client[init] -> server:
|
||||
* group_nr = G
|
||||
* req_id = 0 (I)
|
||||
* server -> client[*]
|
||||
* group_nr = G
|
||||
* req_id = R
|
||||
* client[*] -> server
|
||||
* group_nr = G (I)
|
||||
* req_id = R
|
||||
* bits
|
||||
* server -> client[init]
|
||||
* group_nr = G (I)
|
||||
* req_id = R (I)
|
||||
* bits
|
||||
*
|
||||
* Many of the fields in individual messages are ignored ("I") because
|
||||
* the net id or the omap req_id can be used to identify the
|
||||
* conversation. We always include them on the wire to make inspected
|
||||
* messages easier to follow.
|
||||
*/
|
||||
struct scoutfs_open_ino_map_args {
|
||||
__le64 group_nr;
|
||||
__le64 req_id;
|
||||
};
|
||||
|
||||
struct scoutfs_open_ino_map {
|
||||
struct scoutfs_open_ino_map_args args;
|
||||
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
1266
kmod/src/inode.c
1266
kmod/src/inode.c
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,8 @@
|
||||
|
||||
struct scoutfs_lock;
|
||||
|
||||
#define SCOUTFS_INODE_NR_INDICES 2
|
||||
|
||||
struct scoutfs_inode_info {
|
||||
/* read or initialized for each inode instance */
|
||||
u64 ino;
|
||||
@@ -19,7 +21,9 @@ struct scoutfs_inode_info {
|
||||
u64 data_version;
|
||||
u64 online_blocks;
|
||||
u64 offline_blocks;
|
||||
u64 proj;
|
||||
u32 flags;
|
||||
struct kc_timespec crtime;
|
||||
|
||||
/*
|
||||
* Protects per-inode extent items, most particularly readers
|
||||
@@ -37,23 +41,32 @@ struct scoutfs_inode_info {
|
||||
*/
|
||||
struct mutex item_mutex;
|
||||
bool have_item;
|
||||
u64 item_majors[SCOUTFS_INODE_INDEX_NR];
|
||||
u32 item_minors[SCOUTFS_INODE_INDEX_NR];
|
||||
u64 item_majors[SCOUTFS_INODE_NR_INDICES];
|
||||
u32 item_minors[SCOUTFS_INODE_NR_INDICES];
|
||||
|
||||
/* updated at on each new lock acquisition */
|
||||
atomic64_t last_refreshed;
|
||||
|
||||
/* initialized once for slab object */
|
||||
seqcount_t seqcount;
|
||||
seqlock_t seqlock;
|
||||
bool staging; /* holder of i_mutex is staging */
|
||||
struct scoutfs_per_task pt_data_lock;
|
||||
struct scoutfs_data_waitq data_waitq;
|
||||
struct rw_semaphore xattr_rwsem;
|
||||
struct rb_node writeback_node;
|
||||
struct list_head writeback_entry;
|
||||
|
||||
struct scoutfs_lock_coverage ino_lock_cov;
|
||||
|
||||
struct list_head iput_head;
|
||||
unsigned long iput_count;
|
||||
unsigned long iput_flags;
|
||||
|
||||
struct inode inode;
|
||||
};
|
||||
|
||||
/* try to prune dcache aliases with queued iput */
|
||||
#define SI_IPUT_FLAG_PRUNE (1 << 0)
|
||||
|
||||
static inline struct scoutfs_inode_info *SCOUTFS_I(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct scoutfs_inode_info, inode);
|
||||
@@ -68,11 +81,15 @@ struct inode *scoutfs_alloc_inode(struct super_block *sb);
|
||||
void scoutfs_destroy_inode(struct inode *inode);
|
||||
int scoutfs_drop_inode(struct inode *inode);
|
||||
void scoutfs_evict_inode(struct inode *inode);
|
||||
int scoutfs_orphan_inode(struct inode *inode);
|
||||
void scoutfs_inode_queue_iput(struct inode *inode, unsigned long flags);
|
||||
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup(struct super_block *sb, u64 ino);
|
||||
#define SCOUTFS_IGF_LINKED (1 << 0) /* enoent if nlink == 0 */
|
||||
struct inode *scoutfs_iget(struct super_block *sb, u64 ino, int lkf, int igf);
|
||||
struct inode *scoutfs_ilookup_nowait(struct super_block *sb, u64 ino);
|
||||
struct inode *scoutfs_ilookup_nowait_nonewfree(struct super_block *sb, u64 ino);
|
||||
|
||||
|
||||
void scoutfs_inode_init_key(struct scoutfs_key *key, u64 ino);
|
||||
void scoutfs_inode_init_index_key(struct scoutfs_key *key, u8 type, u64 major,
|
||||
u32 minor, u64 ino);
|
||||
int scoutfs_inode_index_start(struct super_block *sb, u64 *seq);
|
||||
@@ -82,9 +99,9 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
|
||||
struct list_head *list, u64 ino,
|
||||
umode_t mode);
|
||||
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
|
||||
struct list_head *list, u64 seq);
|
||||
struct list_head *list, u64 seq, bool allocing);
|
||||
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
|
||||
bool set_data_seq);
|
||||
bool set_data_seq, bool allocing);
|
||||
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
|
||||
|
||||
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);
|
||||
@@ -92,9 +109,8 @@ void scoutfs_update_inode_item(struct inode *inode, struct scoutfs_lock *lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
int scoutfs_alloc_ino(struct super_block *sb, bool is_dir, u64 *ino_ret);
|
||||
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t rdev, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t rdev,
|
||||
u64 ino, struct scoutfs_lock *lock, struct inode **inode_ret);
|
||||
|
||||
void scoutfs_inode_set_meta_seq(struct inode *inode);
|
||||
void scoutfs_inode_set_data_seq(struct inode *inode);
|
||||
@@ -105,25 +121,43 @@ u64 scoutfs_inode_meta_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_seq(struct inode *inode);
|
||||
u64 scoutfs_inode_data_version(struct inode *inode);
|
||||
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off);
|
||||
u32 scoutfs_inode_get_flags(struct inode *inode);
|
||||
void scoutfs_inode_set_flags(struct inode *inode, u32 and, u32 or);
|
||||
u64 scoutfs_inode_get_proj(struct inode *inode);
|
||||
void scoutfs_inode_set_proj(struct inode *inode, u64 proj);
|
||||
|
||||
int scoutfs_complete_truncate(struct inode *inode, struct scoutfs_lock *lock);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
|
||||
int flags);
|
||||
int scoutfs_inode_check_retention(struct inode *inode);
|
||||
|
||||
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock);
|
||||
#ifdef KC_LINUX_HAVE_RHEL_IOPS_WRAPPER
|
||||
int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat);
|
||||
int scoutfs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
#else
|
||||
int scoutfs_getattr(KC_VFS_NS_DEF
|
||||
const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags);
|
||||
#endif
|
||||
int scoutfs_setattr(KC_VFS_NS_DEF
|
||||
struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
int scoutfs_scan_orphans(struct super_block *sb);
|
||||
int scoutfs_inode_orphan_create(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
int scoutfs_inode_orphan_delete(struct super_block *sb, u64 ino, struct scoutfs_lock *lock,
|
||||
struct scoutfs_lock *primary);
|
||||
void scoutfs_inode_schedule_orphan_dwork(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_queue_writeback(struct inode *inode);
|
||||
int scoutfs_inode_walk_writeback(struct super_block *sb, bool write);
|
||||
|
||||
u64 scoutfs_last_ino(struct super_block *sb);
|
||||
|
||||
void scoutfs_inode_exit(void);
|
||||
int scoutfs_inode_init(void);
|
||||
|
||||
int scoutfs_inode_setup(struct super_block *sb);
|
||||
void scoutfs_inode_start(struct super_block *sb);
|
||||
void scoutfs_inode_orphan_stop(struct super_block *sb);
|
||||
void scoutfs_inode_flush_iput(struct super_block *sb);
|
||||
void scoutfs_inode_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
1003
kmod/src/ioctl.c
1003
kmod/src/ioctl.c
File diff suppressed because it is too large
Load Diff
479
kmod/src/ioctl.h
479
kmod/src/ioctl.h
@@ -13,8 +13,7 @@
|
||||
* This is enforced by pahole scripting in external build environments.
|
||||
*/
|
||||
|
||||
/* XXX I have no idea how these are chosen. */
|
||||
#define SCOUTFS_IOCTL_MAGIC 's'
|
||||
#define SCOUTFS_IOCTL_MAGIC 0xE8 /* arbitrarily chosen hole in ioctl-number.rst */
|
||||
|
||||
/*
|
||||
* Packed scoutfs keys rarely cross the ioctl boundary so we have a
|
||||
@@ -88,7 +87,7 @@ enum scoutfs_ino_walk_seq_type {
|
||||
* Adds entries to the user's buffer for each inode that is found in the
|
||||
* given index between the first and last positions.
|
||||
*/
|
||||
#define SCOUTFS_IOC_WALK_INODES _IOR(SCOUTFS_IOCTL_MAGIC, 1, \
|
||||
#define SCOUTFS_IOC_WALK_INODES _IOW(SCOUTFS_IOCTL_MAGIC, 1, \
|
||||
struct scoutfs_ioctl_walk_inodes)
|
||||
|
||||
/*
|
||||
@@ -163,11 +162,11 @@ struct scoutfs_ioctl_ino_path_result {
|
||||
__u64 dir_pos;
|
||||
__u16 path_bytes;
|
||||
__u8 _pad[6];
|
||||
__u8 path[0];
|
||||
__u8 path[];
|
||||
};
|
||||
|
||||
/* Get a single path from the root to the given inode number */
|
||||
#define SCOUTFS_IOC_INO_PATH _IOR(SCOUTFS_IOCTL_MAGIC, 2, \
|
||||
#define SCOUTFS_IOC_INO_PATH _IOW(SCOUTFS_IOCTL_MAGIC, 2, \
|
||||
struct scoutfs_ioctl_ino_path)
|
||||
|
||||
/*
|
||||
@@ -215,23 +214,16 @@ struct scoutfs_ioctl_stage {
|
||||
/*
|
||||
* Give the user inode fields that are not otherwise visible. statx()
|
||||
* isn't always available and xattrs are relatively expensive.
|
||||
*
|
||||
* @valid_bytes stores the number of bytes that are valid in the
|
||||
* structure. The caller sets this to the size of the struct that they
|
||||
* understand. The kernel then fills and copies back the min of the
|
||||
* size they and the user caller understand. The user can tell if a
|
||||
* field is set if all of its bytes are within the valid_bytes that the
|
||||
* kernel set on return.
|
||||
*
|
||||
* New fields are only added to the end of the struct.
|
||||
*/
|
||||
struct scoutfs_ioctl_stat_more {
|
||||
__u64 valid_bytes;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 crtime_sec;
|
||||
__u32 crtime_nsec;
|
||||
__u8 _pad[4];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_STAT_MORE _IOR(SCOUTFS_IOCTL_MAGIC, 5, \
|
||||
@@ -259,15 +251,16 @@ struct scoutfs_ioctl_data_waiting {
|
||||
__u8 _pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U8_MAX << 0)
|
||||
#define SCOUTFS_IOC_DATA_WAITING_FLAGS_UNKNOWN (U64_MAX << 0)
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOR(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
#define SCOUTFS_IOC_DATA_WAITING _IOW(SCOUTFS_IOCTL_MAGIC, 6, \
|
||||
struct scoutfs_ioctl_data_waiting)
|
||||
|
||||
/*
|
||||
* If i_size is set then data_version must be non-zero. If the offline
|
||||
* flag is set then i_size must be set and a offline extent will be
|
||||
* created from offset 0 to i_size.
|
||||
* created from offset 0 to i_size. The time fields are always applied
|
||||
* to the inode.
|
||||
*/
|
||||
struct scoutfs_ioctl_setattr_more {
|
||||
__u64 data_version;
|
||||
@@ -275,11 +268,12 @@ struct scoutfs_ioctl_setattr_more {
|
||||
__u64 flags;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u8 _pad[4];
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_OFFLINE (1 << 0)
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U8_MAX << 1)
|
||||
#define SCOUTFS_IOC_SETATTR_MORE_UNKNOWN (U64_MAX << 1)
|
||||
|
||||
#define SCOUTFS_IOC_SETATTR_MORE _IOW(SCOUTFS_IOCTL_MAGIC, 7, \
|
||||
struct scoutfs_ioctl_setattr_more)
|
||||
@@ -291,8 +285,8 @@ struct scoutfs_ioctl_listxattr_hidden {
|
||||
__u32 hash_pos;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_LISTXATTR_HIDDEN _IOR(SCOUTFS_IOCTL_MAGIC, 8, \
|
||||
struct scoutfs_ioctl_listxattr_hidden)
|
||||
#define SCOUTFS_IOC_LISTXATTR_HIDDEN _IOWR(SCOUTFS_IOCTL_MAGIC, 8, \
|
||||
struct scoutfs_ioctl_listxattr_hidden)
|
||||
|
||||
/*
|
||||
* Return the inode numbers of inodes which might contain the given
|
||||
@@ -345,32 +339,23 @@ struct scoutfs_ioctl_search_xattrs {
|
||||
/* set in output_flags if returned inodes reached last_ino */
|
||||
#define SCOUTFS_SEARCH_XATTRS_OFLAG_END (1ULL << 0)
|
||||
|
||||
#define SCOUTFS_IOC_SEARCH_XATTRS _IOR(SCOUTFS_IOCTL_MAGIC, 9, \
|
||||
struct scoutfs_ioctl_search_xattrs)
|
||||
#define SCOUTFS_IOC_SEARCH_XATTRS _IOW(SCOUTFS_IOCTL_MAGIC, 9, \
|
||||
struct scoutfs_ioctl_search_xattrs)
|
||||
|
||||
/*
|
||||
* Give the user information about the filesystem.
|
||||
*
|
||||
* @valid_bytes stores the number of bytes that are valid in the
|
||||
* structure. The caller sets this to the size of the struct that they
|
||||
* understand. The kernel then fills and copies back the min of the
|
||||
* size they and the user caller understand. The user can tell if a
|
||||
* field is set if all of its bytes are within the valid_bytes that the
|
||||
* kernel set on return.
|
||||
*
|
||||
* @committed_seq: All seqs up to and including this seq have been
|
||||
* committed. Can be compared with meta_seq and data_seq from inodes in
|
||||
* stat_more to discover if changes have been committed to disk.
|
||||
*
|
||||
* New fields are only added to the end of the struct.
|
||||
*/
|
||||
struct scoutfs_ioctl_statfs_more {
|
||||
__u64 valid_bytes;
|
||||
__u64 fsid;
|
||||
__u64 rid;
|
||||
__u64 committed_seq;
|
||||
__u64 total_meta_blocks;
|
||||
__u64 total_data_blocks;
|
||||
__u64 reserved_meta_blocks;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_STATFS_MORE _IOR(SCOUTFS_IOCTL_MAGIC, 10, \
|
||||
@@ -391,7 +376,7 @@ struct scoutfs_ioctl_data_wait_err {
|
||||
__s64 err;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_DATA_WAIT_ERR _IOR(SCOUTFS_IOCTL_MAGIC, 11, \
|
||||
#define SCOUTFS_IOC_DATA_WAIT_ERR _IOW(SCOUTFS_IOCTL_MAGIC, 11, \
|
||||
struct scoutfs_ioctl_data_wait_err)
|
||||
|
||||
|
||||
@@ -410,7 +395,7 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
__u8 __pad[6];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOR(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
#define SCOUTFS_IOC_ALLOC_DETAIL _IOW(SCOUTFS_IOCTL_MAGIC, 12, \
|
||||
struct scoutfs_ioctl_alloc_detail)
|
||||
|
||||
/*
|
||||
@@ -418,12 +403,13 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
* on the same file system.
|
||||
*
|
||||
* from_fd specifies the source file and the ioctl is called on the
|
||||
* destination file. Both files must have write access. from_off
|
||||
* specifies the byte offset in the source, to_off is the byte offset in
|
||||
* the destination, and len is the number of bytes in the region to
|
||||
* move. All of the offsets and lengths must be in multiples of 4KB,
|
||||
* except in the case where the from_off + len ends at the i_size of the
|
||||
* source file.
|
||||
* destination file. Both files must have write access. from_off specifies
|
||||
* the byte offset in the source, to_off is the byte offset in the
|
||||
* destination, and len is the number of bytes in the region to move. All of
|
||||
* the offsets and lengths must be in multiples of 4KB, except in the case
|
||||
* where the from_off + len ends at the i_size of the source
|
||||
* file. data_version is only used when STAGE flag is set (see below). flags
|
||||
* field is currently only used to optionally specify STAGE behavior.
|
||||
*
|
||||
* This interface only moves extents which are block granular, it does
|
||||
* not perform RMW of sub-block byte extents and it does not overwrite
|
||||
@@ -435,33 +421,426 @@ struct scoutfs_ioctl_alloc_detail_entry {
|
||||
* i_size. The i_size update will maintain final partial blocks in the
|
||||
* source.
|
||||
*
|
||||
* It will return an error if either of the files have offline extents.
|
||||
* It will return 0 when all of the extents in the source region have
|
||||
* been moved to the destination. Moving extents updates the ctime,
|
||||
* mtime, meta_seq, data_seq, and data_version fields of both the source
|
||||
* and destination inodes. If an error is returned then partial
|
||||
* If STAGE flag is not set, it will return an error if either of the files
|
||||
* have offline extents. It will return 0 when all of the extents in the
|
||||
* source region have been moved to the destination. Moving extents updates
|
||||
* the ctime, mtime, meta_seq, data_seq, and data_version fields of both the
|
||||
* source and destination inodes. If an error is returned then partial
|
||||
* progress may have been made and inode fields may have been updated.
|
||||
*
|
||||
* If STAGE flag is set, as above except destination range must be in an
|
||||
* offline extent. Fields are updated only for source inode.
|
||||
*
|
||||
* Errors specific to this interface include:
|
||||
*
|
||||
* EINVAL: from_off, len, or to_off aren't a multiple of 4KB; the source
|
||||
* and destination files are the same inode; either the source or
|
||||
* destination is not a regular file; the destination file has
|
||||
* an existing overlapping extent.
|
||||
* an existing overlapping extent (if STAGE flag not set); the
|
||||
* destination range is not in an offline extent (if STAGE set).
|
||||
* EOVERFLOW: either from_off + len or to_off + len exceeded 64bits.
|
||||
* EBADF: from_fd isn't a valid open file descriptor.
|
||||
* EXDEV: the source and destination files are in different filesystems.
|
||||
* EISDIR: either the source or destination is a directory.
|
||||
* ENODATA: either the source or destination file have offline extents.
|
||||
* ENODATA: either the source or destination file have offline extents and
|
||||
* STAGE flag is not set.
|
||||
* ESTALE: data_version does not match destination data_version.
|
||||
*/
|
||||
#define SCOUTFS_IOC_MB_STAGE (1 << 0)
|
||||
#define SCOUTFS_IOC_MB_UNKNOWN (U64_MAX << 1)
|
||||
|
||||
struct scoutfs_ioctl_move_blocks {
|
||||
__u64 from_fd;
|
||||
__u64 from_off;
|
||||
__u64 len;
|
||||
__u64 to_off;
|
||||
__u64 data_version;
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOR(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
#define SCOUTFS_IOC_MOVE_BLOCKS _IOW(SCOUTFS_IOCTL_MAGIC, 13, \
|
||||
struct scoutfs_ioctl_move_blocks)
|
||||
|
||||
struct scoutfs_ioctl_resize_devices {
|
||||
__u64 new_total_meta_blocks;
|
||||
__u64 new_total_data_blocks;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_RESIZE_DEVICES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 14, struct scoutfs_ioctl_resize_devices)
|
||||
|
||||
#define SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR 3
|
||||
|
||||
/*
|
||||
* Copy global totals of .totl. xattr value payloads to the user. This
|
||||
* only sees xattrs which have been committed and this doesn't force
|
||||
* commits of dirty data throughout the system. This can be out of sync
|
||||
* by the amount of xattrs that can be dirty in open transactions that
|
||||
* are being built throughout the system.
|
||||
*
|
||||
* pos_name: The array name of the first total that can be returned.
|
||||
* The name is derived from the key of the xattrs that contribute to the
|
||||
* total. For xattrs with a .totl.1.2.3 key, the pos_name[] should be
|
||||
* {1, 2, 3}.
|
||||
*
|
||||
* totals_ptr: An aligned pointer to a buffer that will be filled with
|
||||
* an array of scoutfs_ioctl_xattr_total structs for each total copied.
|
||||
*
|
||||
* totals_bytes: The size of the buffer in bytes. There must be room
|
||||
* for at least one struct element so that returning 0 can promise that
|
||||
* there were no more totals to copy after the pos_name.
|
||||
*
|
||||
* The number of copied elements is returned and 0 is returned if there
|
||||
* were no more totals to copy after the pos_name.
|
||||
*
|
||||
* In addition to the usual errnos (EIO, EINVAL, EPERM, EFAULT) this
|
||||
* adds:
|
||||
*
|
||||
* EINVAL: The totals_ buffer was not aligned or was not large enough
|
||||
* for a single struct entry.
|
||||
*/
|
||||
struct scoutfs_ioctl_read_xattr_totals {
|
||||
__u64 pos_name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
|
||||
__u64 totals_ptr;
|
||||
__u64 totals_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* An individual total that is given to userspace. The total is the
|
||||
* sum of all the values in the xattr payloads matching the name. The
|
||||
* count is the number of xattrs, not number of files, contributing to
|
||||
* the total.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_total {
|
||||
__u64 name[SCOUTFS_IOCTL_XATTR_TOTAL_NAME_NR];
|
||||
__u64 total;
|
||||
__u64 count;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_TOTALS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 15, struct scoutfs_ioctl_read_xattr_totals)
|
||||
|
||||
/*
|
||||
* This fills the caller's inos array with inode numbers that are in use
|
||||
* after the start ino, within an internal inode group.
|
||||
*
|
||||
* This only makes a promise about the state of the inode numbers within
|
||||
* the first and last numbers returned by one call. At one time, all of
|
||||
* those inodes were still allocated. They could have changed before
|
||||
* the call returned. And any numbers outside of the first and last
|
||||
* (or single) are undefined.
|
||||
*
|
||||
* This doesn't iterate over all allocated inodes, it only probes a
|
||||
* single group that the start inode is within. This interface was
|
||||
* first introduced to support tests that needed to find out about a
|
||||
* specific inode, while having some other similarly niche uses. It is
|
||||
* unsuitable for a consistent iteration over all the inode numbers in
|
||||
* use.
|
||||
*
|
||||
* This test of inode items doesn't serialize with the inode lifetime
|
||||
* mechanism. It only tells you the numbers of inodes that were once
|
||||
* active in the system and haven't yet been fully deleted. The inode
|
||||
* numbers returned could have been in the process of being deleted and
|
||||
* were already unreachable even before the call started.
|
||||
*
|
||||
* @start_ino: the first inode number that could be returned
|
||||
* @inos_ptr: pointer to an aligned array of 64bit inode numbers
|
||||
* @inos_bytes: the number of bytes available in the inos_ptr array
|
||||
*
|
||||
* Returns errors or the count of inode numbers returned, quite possibly
|
||||
* including 0.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_allocated_inos {
|
||||
__u64 start_ino;
|
||||
__u64 inos_ptr;
|
||||
__u64 inos_bytes;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_GET_ALLOCATED_INOS \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 16, struct scoutfs_ioctl_get_allocated_inos)
|
||||
|
||||
/*
|
||||
* Get directory entries that refer to a specific inode.
|
||||
*
|
||||
* @ino: The target ino that we're finding referring entries to.
|
||||
* Constant across all the calls that make up an iteration over all the
|
||||
* inode's entries.
|
||||
*
|
||||
* @dir_ino: The inode number of a directory containing the entry to our
|
||||
* inode to search from. If this parent directory contains no more
|
||||
* entries to our inode then we'll search through other parent directory
|
||||
* inodes in inode order.
|
||||
*
|
||||
* @dir_pos: The position in the dir_ino parent directory of the entry
|
||||
* to our inode to search from. If there is no entry at this position
|
||||
* then we'll search through other entry positions in increasing order.
|
||||
* If we exhaust the parent directory then we'll search through
|
||||
* additional parent directories in inode order.
|
||||
*
|
||||
* @entries_ptr: A pointer to the buffer where found entries will be
|
||||
* stored. The pointer must be aligned to 16 bytes.
|
||||
*
|
||||
* @entries_bytes: The size of the buffer that will contain entries.
|
||||
*
|
||||
* To start iterating set the desired target ino, dir_ino to 0, dir_pos
|
||||
* to 0, and set result_ptr and _bytes to a sufficiently large buffer.
|
||||
* Each entry struct that's stored in the buffer adds some overhead so a
|
||||
* large multiple of the largest possible name is a reasonable choice.
|
||||
* (A few multiples of PATH_MAX perhaps.)
|
||||
*
|
||||
* Each call returns the total number of entries that were stored in the
|
||||
* entries buffer. Zero is returned when the search was successful and
|
||||
* no referring entries were found. The entries can be iterated over by
|
||||
* advancing each starting struct offset by the total number of bytes in
|
||||
* each entry. If the _LAST flag is set on an entry then there were no
|
||||
* more entries referring to the inode at the time of the call and
|
||||
* iteration can be stopped.
|
||||
*
|
||||
* To resume iteration set the next call's starting dir_ino and dir_pos
|
||||
* to one past the last entry seen. Increment the last entry's dir_pos,
|
||||
* and if it wrapped to 0, increment its dir_ino.
|
||||
*
|
||||
* This does not check that the caller has permission to read the
|
||||
* entries found in each containing directory. It requires
|
||||
* CAP_DAC_READ_SEARCH which bypasses path traversal permissions
|
||||
* checking.
|
||||
*
|
||||
* Entries returned by a single call can reflect any combination of
|
||||
* racing creation and removal of entries. Each entry existed at the
|
||||
* time it was read though it may have changed in the time it took to
|
||||
* return from the call. The set of entries returned may no longer
|
||||
* reflect the current set of entries and may not have existed at the
|
||||
* same time.
|
||||
*
|
||||
* This has no knowledge of the life cycle of the inode. It can return
|
||||
* 0 when there are no referring entries because either the target inode
|
||||
* doesn't exist, it is in the process of being deleted, or because it
|
||||
* is still open while being unlinked.
|
||||
*
|
||||
* On success this returns the number of entries filled in the buffer.
|
||||
* A return of 0 indicates that no entries referred to the inode.
|
||||
*
|
||||
* EINVAL is returned when there is a problem with the buffer. Either
|
||||
* it was not aligned or it was not large enough for the first entry.
|
||||
*
|
||||
* Many other errnos indicate hard failure to find the next entry.
|
||||
*/
|
||||
struct scoutfs_ioctl_get_referring_entries {
|
||||
__u64 ino;
|
||||
__u64 dir_ino;
|
||||
__u64 dir_pos;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* @dir_ino: The inode of the directory containing the entry.
|
||||
*
|
||||
* @dir_pos: The readdir f_pos position of the entry within the
|
||||
* directory.
|
||||
*
|
||||
* @ino: The inode number of the target of the entry.
|
||||
*
|
||||
* @flags: Flags associated with this entry.
|
||||
*
|
||||
* @d_type: Inode type as specified with DT_ enum values in readdir(3).
|
||||
*
|
||||
* @entry_bytes: The total bytes taken by the entry in memory, including
|
||||
* the name and any alignment padding. The start of a following entry
|
||||
* will be found after this number of bytes.
|
||||
*
|
||||
* @name_len: The number of bytes in the name not including the trailing
|
||||
* null, ala strlen(3).
|
||||
*
|
||||
* @name: The null terminated name of the referring entry. In the
|
||||
* struct definition this array is sized to naturally align the struct.
|
||||
* That number of padded bytes are not necessarily found in the buffer
|
||||
* returned by _get_referring_entries;
|
||||
*/
|
||||
struct scoutfs_ioctl_dirent {
|
||||
__u64 dir_ino;
|
||||
__u64 dir_pos;
|
||||
__u64 ino;
|
||||
__u16 entry_bytes;
|
||||
__u8 flags;
|
||||
__u8 d_type;
|
||||
__u8 name_len;
|
||||
__u8 name[3];
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOCTL_DIRENT_FLAG_LAST (1 << 0)
|
||||
|
||||
#define SCOUTFS_IOC_GET_REFERRING_ENTRIES \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 17, struct scoutfs_ioctl_get_referring_entries)
|
||||
|
||||
struct scoutfs_ioctl_inode_attr_x {
|
||||
__u64 x_mask;
|
||||
__u64 x_flags;
|
||||
__u64 meta_seq;
|
||||
__u64 data_seq;
|
||||
__u64 data_version;
|
||||
__u64 online_blocks;
|
||||
__u64 offline_blocks;
|
||||
__u64 ctime_sec;
|
||||
__u32 ctime_nsec;
|
||||
__u32 crtime_nsec;
|
||||
__u64 crtime_sec;
|
||||
__u64 size;
|
||||
__u64 bits;
|
||||
__u64 project_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Behavioral flags set in the x_flags field. These flags don't
|
||||
* necessarily correspond to specific attributes, but instead change the
|
||||
* behaviour of a _get_ or _set_ operation.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_F_SIZE_OFFLINE: When setting i_size, also create
|
||||
* extents which are marked offline for the region of the file from
|
||||
* offset 0 to the new set size. This can only be set when setting the
|
||||
* size and has no effect if setting the size fails.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_F_SIZE_OFFLINE (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_F__UNKNOWN (U64_MAX << 1)
|
||||
|
||||
/*
|
||||
* Single-bit values stored in the @bits field. These indicate whether
|
||||
* the bit is set, or not. The main _IAX_ bits set in the mask indicate
|
||||
* whether this value bit is populated by _get or stored by _set.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_B_RETENTION (1ULL << 0)
|
||||
|
||||
/*
|
||||
* x_mask bits which indicate which attributes of the inode to populate
|
||||
* on return for _get or to set on the inode for _set. Each mask bit
|
||||
* corresponds to the matching named field in the attr_x struct passed
|
||||
* to the _get_ and _set_ calls.
|
||||
*
|
||||
* Each field can have different permissions or other attribute
|
||||
* requirements which can cause calls to fail. If _set_ fails then no
|
||||
* other attribute changes will have been made by the same call.
|
||||
*
|
||||
* @SCOUTFS_IOC_IAX_RETENTION: Mark a file for retention. When marked,
|
||||
* no modification can be made to the file other than changing extended
|
||||
* attributes outside the "user." prefix and clearing the retention
|
||||
* mark. This can only be set on regular files and requires root (the
|
||||
* CAP_SYS_ADMIN capability). Other attributes can be set with a
|
||||
* set_attr_x call on a retention inode as long as that call also
|
||||
* successfully clears the retention mark.
|
||||
*/
|
||||
#define SCOUTFS_IOC_IAX_META_SEQ (1ULL << 0)
|
||||
#define SCOUTFS_IOC_IAX_DATA_SEQ (1ULL << 1)
|
||||
#define SCOUTFS_IOC_IAX_DATA_VERSION (1ULL << 2)
|
||||
#define SCOUTFS_IOC_IAX_ONLINE_BLOCKS (1ULL << 3)
|
||||
#define SCOUTFS_IOC_IAX_OFFLINE_BLOCKS (1ULL << 4)
|
||||
#define SCOUTFS_IOC_IAX_CTIME (1ULL << 5)
|
||||
#define SCOUTFS_IOC_IAX_CRTIME (1ULL << 6)
|
||||
#define SCOUTFS_IOC_IAX_SIZE (1ULL << 7)
|
||||
#define SCOUTFS_IOC_IAX_RETENTION (1ULL << 8)
|
||||
#define SCOUTFS_IOC_IAX_PROJECT_ID (1ULL << 9)
|
||||
|
||||
/* single bit attributes that are packed in the bits field as _B_ */
|
||||
#define SCOUTFS_IOC_IAX__BITS (SCOUTFS_IOC_IAX_RETENTION)
|
||||
/* inverse of all the bits we understand */
|
||||
#define SCOUTFS_IOC_IAX__UNKNOWN (U64_MAX << 10)
|
||||
|
||||
#define SCOUTFS_IOC_GET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 18, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
#define SCOUTFS_IOC_SET_ATTR_X \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 19, struct scoutfs_ioctl_inode_attr_x)
|
||||
|
||||
/*
|
||||
* (These fields are documented in the order that they're displayed by
|
||||
* the scoutfs cli utility which matches the sort order of the rules.)
|
||||
*
|
||||
* @prio: The priority of the rule. Rules are sorted by their fields
|
||||
* with prio at the highest magnitude. When multiple rules match the
|
||||
* rule with the highest sort order is enforced. The priority field
|
||||
* lets rules override the default field sort order.
|
||||
*
|
||||
* @name_val[3]: The three 64bit values that make up the name of the
|
||||
* totl xattr whose total will be checked against the rule's limit to
|
||||
* see if the quota rule has been exceeded. The behavior of the values
|
||||
* can be changed by their corresponding name_source and name_flags.
|
||||
*
|
||||
* @name_source[3]: The SQ_NS_ enums that control where the value comes
|
||||
* from. _LITERAL uses the value from name_val. Inode attribute
|
||||
* sources (_PROJ, _UID, _GID) are taken from the inode of the operation
|
||||
* that is being checked against the rule.
|
||||
*
|
||||
* @name_flags[3]: The SQ_NF_ enums that alter the name values. _SELECT
|
||||
* makes the rule only match if the inode attribute of the operation
|
||||
* matches the attribute value stored in name_val. This lets rules
|
||||
* match a specific value of an attribute rather than mapping all
|
||||
* attribute values of to totl names.
|
||||
*
|
||||
* @op: The SQ_OP_ enums which specify the operation that can't exceed
|
||||
* the rule's limit. _INODE checks inode creation and the inode
|
||||
* attributes are taken from the inode that would be created. _DATA
|
||||
* checks file data block allocation and the inode fields come from the
|
||||
* inode that is allocating the blocks.
|
||||
*
|
||||
* @limit: The 64bit value that is checked against the totl value
|
||||
* described by the rule. If the totl value is greater than or equal to
|
||||
* this value of the matching rule then the operation will return
|
||||
* -EDQUOT.
|
||||
*
|
||||
* @rule_flags: SQ_RF_TOTL_COUNT indicates that the rule's limit should
|
||||
* be checked against the number of xattrs contributing to a totl value
|
||||
* instead of the sum of the xattrs.
|
||||
*/
|
||||
struct scoutfs_ioctl_quota_rule {
|
||||
__u64 name_val[3];
|
||||
__u64 limit;
|
||||
__u8 prio;
|
||||
__u8 op;
|
||||
__u8 rule_flags;
|
||||
__u8 name_source[3];
|
||||
__u8 name_flags[3];
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_get_quota_rules {
|
||||
__u64 iterator[2];
|
||||
__u64 rules_ptr;
|
||||
__u64 rules_nr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Rules are uniquely identified by their non-padded fields. Addition will fail
|
||||
* with -EEXIST if the specified rule already exists and deletion must find a rule
|
||||
* with all matching fields to delete.
|
||||
*/
|
||||
#define SCOUTFS_IOC_GET_QUOTA_RULES \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 20, struct scoutfs_ioctl_get_quota_rules)
|
||||
#define SCOUTFS_IOC_ADD_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 21, struct scoutfs_ioctl_quota_rule)
|
||||
#define SCOUTFS_IOC_DEL_QUOTA_RULE \
|
||||
_IOW(SCOUTFS_IOCTL_MAGIC, 22, struct scoutfs_ioctl_quota_rule)
|
||||
|
||||
/*
|
||||
* Inodes can be indexed in a global key space at a position determined
|
||||
* by a .indx. tagged xattr. The xattr name specifies the two index
|
||||
* position values, with major having the more significant comparison
|
||||
* order.
|
||||
*/
|
||||
struct scoutfs_ioctl_xattr_index_entry {
|
||||
__u64 minor;
|
||||
__u64 ino;
|
||||
__u8 major;
|
||||
__u8 _pad[7];
|
||||
};
|
||||
|
||||
struct scoutfs_ioctl_read_xattr_index {
|
||||
__u64 flags;
|
||||
struct scoutfs_ioctl_xattr_index_entry first;
|
||||
struct scoutfs_ioctl_xattr_index_entry last;
|
||||
__u64 entries_ptr;
|
||||
__u64 entries_nr;
|
||||
};
|
||||
|
||||
#define SCOUTFS_IOC_READ_XATTR_INDEX \
|
||||
_IOR(SCOUTFS_IOCTL_MAGIC, 23, struct scoutfs_ioctl_read_xattr_index)
|
||||
|
||||
#endif
|
||||
|
||||
544
kmod/src/item.c
544
kmod/src/item.c
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,8 @@
|
||||
|
||||
int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_smaller_zero(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_lookup_exact(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock);
|
||||
@@ -15,14 +17,15 @@ int scoutfs_item_create(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len,
|
||||
struct scoutfs_lock *lock);
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary);
|
||||
int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
|
||||
void *val, int val_len, struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete_force(struct super_block *sb,
|
||||
struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock);
|
||||
int scoutfs_item_delete_force(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_lock *lock, struct scoutfs_lock *primary);
|
||||
|
||||
u64 scoutfs_item_dirty_pages(struct super_block *sb);
|
||||
int scoutfs_item_write_dirty(struct super_block *sb);
|
||||
|
||||
83
kmod/src/kernelcompat.c
Normal file
83
kmod/src/kernelcompat.c
Normal file
@@ -0,0 +1,83 @@
|
||||
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include "kernelcompat.h"
|
||||
|
||||
#ifdef KC_SHRINKER_SHRINK
|
||||
#include <linux/shrinker.h>
|
||||
/*
|
||||
* If a target doesn't have that .{count,scan}_objects() interface then
|
||||
* we have a .shrink() helper that performs the shrink work in terms of
|
||||
* count/scan.
|
||||
*/
|
||||
int kc_shrink_wrapper_fn(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
struct kc_shrinker_wrapper *wrapper = container_of(shrink, struct kc_shrinker_wrapper, shrink);
|
||||
unsigned long nr;
|
||||
unsigned long rc;
|
||||
|
||||
if (sc->nr_to_scan != 0) {
|
||||
rc = wrapper->scan_objects(shrink, sc);
|
||||
/* translate magic values to the equivalent for older kernels */
|
||||
if (rc == SHRINK_STOP)
|
||||
return -1;
|
||||
else if (rc == SHRINK_EMPTY)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nr = wrapper->count_objects(shrink, sc);
|
||||
|
||||
return min_t(unsigned long, nr, INT_MAX);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_CURRENT_TIME_INODE
|
||||
struct timespec64 kc_current_time(struct inode *inode)
|
||||
{
|
||||
struct timespec64 now;
|
||||
unsigned gran;
|
||||
|
||||
getnstimeofday64(&now);
|
||||
|
||||
if (unlikely(!inode->i_sb)) {
|
||||
WARN(1, "current_time() called with uninitialized super_block in the inode");
|
||||
return now;
|
||||
}
|
||||
|
||||
gran = inode->i_sb->s_time_gran;
|
||||
|
||||
/* Avoid division in the common cases 1 ns and 1 s. */
|
||||
if (gran == 1) {
|
||||
/* nothing */
|
||||
} else if (gran == NSEC_PER_SEC) {
|
||||
now.tv_nsec = 0;
|
||||
} else if (gran > 1 && gran < NSEC_PER_SEC) {
|
||||
now.tv_nsec -= now.tv_nsec % gran;
|
||||
} else {
|
||||
WARN(1, "illegal file time granularity: %u", gran);
|
||||
}
|
||||
|
||||
return now;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
ssize_t
|
||||
kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written)
|
||||
{
|
||||
ssize_t status;
|
||||
struct iov_iter i;
|
||||
|
||||
iov_iter_init(&i, WRITE, iov, nr_segs, count);
|
||||
status = kc_generic_perform_write(iocb, &i, pos);
|
||||
|
||||
if (likely(status >= 0)) {
|
||||
written += status;
|
||||
*ppos = pos + status;
|
||||
}
|
||||
|
||||
return written ? written : status;
|
||||
}
|
||||
#endif
|
||||
@@ -1,48 +1,412 @@
|
||||
#ifndef _SCOUTFS_KERNELCOMPAT_H_
|
||||
#define _SCOUTFS_KERNELCOMPAT_H_
|
||||
|
||||
#ifndef KC_ITERATE_DIR_CONTEXT
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
typedef filldir_t kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, dirent, ctx)
|
||||
#define KC_FOP_READDIR readdir
|
||||
#define kc_readdir_pos(filp, ctx) (filp)->f_pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, dirent, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
(ctx(dirent, name, name_len, pos, ino, dt) == 0)
|
||||
|
||||
/*
|
||||
* v4.15-rc3-4-gae5e165d855d
|
||||
*
|
||||
* new API for handling inode->i_version. This forces us to
|
||||
* include this API where we need. We include it here for
|
||||
* convenience instead of where it's needed.
|
||||
*/
|
||||
#ifdef KC_NEED_LINUX_IVERSION_H
|
||||
#include <linux/iversion.h>
|
||||
#else
|
||||
typedef struct dir_context * kc_readdir_ctx_t;
|
||||
#define KC_DECLARE_READDIR(name, file, dirent, ctx) name(file, ctx)
|
||||
#define KC_FOP_READDIR iterate
|
||||
#define kc_readdir_pos(filp, ctx) (ctx)->pos
|
||||
#define kc_dir_emit_dots(file, dirent, ctx) dir_emit_dots(file, ctx)
|
||||
#define kc_dir_emit(ctx, dirent, name, name_len, pos, ino, dt) \
|
||||
dir_emit(ctx, name, name_len, ino, dt)
|
||||
/*
|
||||
* Kernels before above version will need to fall back to
|
||||
* manipulating inode->i_version as previous with degraded
|
||||
* methods.
|
||||
*/
|
||||
#define inode_set_iversion_queried(inode, val) \
|
||||
do { \
|
||||
(inode)->i_version = val; \
|
||||
} while (0)
|
||||
#define inode_peek_iversion(inode) \
|
||||
({ \
|
||||
(inode)->i_version; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef KC_POSIX_ACL_VALID_USER_NS
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(user_ns, acl)
|
||||
#else
|
||||
#define kc_posix_acl_valid(user_ns, acl) posix_acl_valid(acl)
|
||||
#endif
|
||||
|
||||
#ifndef KC_DIR_EMIT_DOTS
|
||||
/*
|
||||
* Kernels before ->iterate and don't have dir_emit_dots so we give them
|
||||
* one that works with the ->readdir() filldir() method.
|
||||
* v3.6-rc1-24-gdbf2576e37da
|
||||
*
|
||||
* All workqueues are now non-reentrant, and the bit flag is removed
|
||||
* shortly after its uses were removed.
|
||||
*/
|
||||
static inline int dir_emit_dots(struct file *file, void *dirent,
|
||||
filldir_t filldir)
|
||||
#ifndef WQ_NON_REENTRANT
|
||||
#define WQ_NON_REENTRANT 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v3.18-rc2-19-gb5ae6b15bd73
|
||||
*
|
||||
* Folds d_materialise_unique into d_splice_alias. Note reversal
|
||||
* of arguments (Also note Documentation/filesystems/porting.rst)
|
||||
*/
|
||||
#ifndef KC_D_MATERIALISE_UNIQUE
|
||||
#define d_materialise_unique(dentry, inode) d_splice_alias(inode, dentry)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v4.8-rc1-29-g31051c85b5e2
|
||||
*
|
||||
* fall back to inode_change_ok() if setattr_prepare() isn't available
|
||||
*/
|
||||
#ifndef KC_SETATTR_PREPARE
|
||||
#define setattr_prepare(dentry, attr) inode_change_ok(d_inode(dentry), attr)
|
||||
#endif
|
||||
|
||||
#ifndef KC___POSIX_ACL_CREATE
|
||||
#define __posix_acl_create posix_acl_create
|
||||
#define __posix_acl_chmod posix_acl_chmod
|
||||
#endif
|
||||
|
||||
#ifndef KC_PERCPU_COUNTER_ADD_BATCH
|
||||
#define percpu_counter_add_batch __percpu_counter_add
|
||||
#endif
|
||||
|
||||
#ifndef KC_MEMALLOC_NOFS_SAVE
|
||||
#define memalloc_nofs_save memalloc_noio_save
|
||||
#define memalloc_nofs_restore memalloc_noio_restore
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_BI_OPF
|
||||
#define kc_bio_get_opf(bio) \
|
||||
({ \
|
||||
(bio)->bi_opf; \
|
||||
})
|
||||
#define kc_bio_set_opf(bio, opf) \
|
||||
do { \
|
||||
(bio)->bi_opf = opf; \
|
||||
} while (0)
|
||||
#define kc_bio_set_sector(bio, sect) \
|
||||
do { \
|
||||
(bio)->bi_iter.bi_sector = sect;\
|
||||
} while (0)
|
||||
#define kc_submit_bio(bio) submit_bio(bio)
|
||||
#else
|
||||
#define kc_bio_get_opf(bio) \
|
||||
({ \
|
||||
(bio)->bi_rw; \
|
||||
})
|
||||
#define kc_bio_set_opf(bio, opf) \
|
||||
do { \
|
||||
(bio)->bi_rw = opf; \
|
||||
} while (0)
|
||||
#define kc_bio_set_sector(bio, sect) \
|
||||
do { \
|
||||
(bio)->bi_sector = sect; \
|
||||
} while (0)
|
||||
#define kc_submit_bio(bio) \
|
||||
do { \
|
||||
submit_bio((bio)->bi_rw, bio); \
|
||||
} while (0)
|
||||
#define bio_set_dev(bio, bdev) \
|
||||
do { \
|
||||
(bio)->bi_bdev = (bdev); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_BI_STATUS
|
||||
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio)
|
||||
#define kc_bio_get_errno(bio) ({ blk_status_to_errno((bio)->bi_status); })
|
||||
#else
|
||||
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio, int _error_arg)
|
||||
#define kc_bio_get_errno(bio) ({ (int)((void)(bio), _error_arg); })
|
||||
#endif
|
||||
|
||||
/*
|
||||
* v4.13-rc1-6-ge462ec50cb5f
|
||||
*
|
||||
* MS_* (mount) flags from <linux/mount.h> should not be used in the kernel
|
||||
* anymore from 4.x onwards. Instead, we need to use the SB_* (superblock) flags
|
||||
*/
|
||||
#ifndef SB_POSIXACL
|
||||
#define SB_POSIXACL MS_POSIXACL
|
||||
#define SB_I_VERSION MS_I_VERSION
|
||||
#endif
|
||||
|
||||
#ifndef KC_CURRENT_TIME_INODE
|
||||
struct timespec64 kc_current_time(struct inode *inode);
|
||||
#define current_time kc_current_time
|
||||
#define kc_timespec timespec
|
||||
#else
|
||||
#define kc_timespec timespec64
|
||||
#endif
|
||||
|
||||
#ifndef KC_SHRINKER_SHRINK
|
||||
|
||||
#define KC_DEFINE_SHRINKER(name) struct shrinker name
|
||||
#define KC_INIT_SHRINKER_FUNCS(name, countfn, scanfn) do { \
|
||||
__typeof__(name) _shrink = (name); \
|
||||
_shrink->count_objects = (countfn); \
|
||||
_shrink->scan_objects = (scanfn); \
|
||||
_shrink->seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(ptr, type, shrinker)
|
||||
#ifdef KC_SHRINKER_NAME
|
||||
#define KC_REGISTER_SHRINKER register_shrinker
|
||||
#else
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr))
|
||||
#endif /* KC_SHRINKER_NAME */
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr)
|
||||
#else
|
||||
|
||||
#include <linux/shrinker.h>
|
||||
#ifndef SHRINK_STOP
|
||||
#define SHRINK_STOP (~0UL)
|
||||
#define SHRINK_EMPTY (~0UL - 1)
|
||||
#endif
|
||||
|
||||
int kc_shrink_wrapper_fn(struct shrinker *shrink, struct shrink_control *sc);
|
||||
struct kc_shrinker_wrapper {
|
||||
unsigned long (*count_objects)(struct shrinker *, struct shrink_control *sc);
|
||||
unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *sc);
|
||||
struct shrinker shrink;
|
||||
};
|
||||
|
||||
#define KC_DEFINE_SHRINKER(name) struct kc_shrinker_wrapper name;
|
||||
#define KC_INIT_SHRINKER_FUNCS(name, countfn, scanfn) do { \
|
||||
struct kc_shrinker_wrapper *_wrap = (name); \
|
||||
_wrap->count_objects = (countfn); \
|
||||
_wrap->scan_objects = (scanfn); \
|
||||
_wrap->shrink.shrink = kc_shrink_wrapper_fn; \
|
||||
_wrap->shrink.seeks = DEFAULT_SEEKS; \
|
||||
} while (0)
|
||||
#define KC_SHRINKER_CONTAINER_OF(ptr, type) container_of(container_of(ptr, struct kc_shrinker_wrapper, shrink), type, shrinker)
|
||||
#define KC_REGISTER_SHRINKER(ptr, fmt, ...) (register_shrinker(ptr.shrink))
|
||||
#define KC_UNREGISTER_SHRINKER(ptr) (unregister_shrinker(ptr.shrink))
|
||||
#define KC_SHRINKER_FN(ptr) (ptr.shrink)
|
||||
|
||||
#endif /* KC_SHRINKER_SHRINK */
|
||||
|
||||
#ifdef KC_KERNEL_GETSOCKNAME_ADDRLEN
|
||||
#include <linux/net.h>
|
||||
#include <linux/inet.h>
|
||||
static inline int kc_kernel_getsockname(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
if (file->f_pos == 0) {
|
||||
if (filldir(dirent, ".", 1, 1,
|
||||
file->f_path.dentry->d_inode->i_ino, DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 1;
|
||||
}
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int ret = kernel_getsockname(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (file->f_pos == 1) {
|
||||
if (filldir(dirent, "..", 2, 1,
|
||||
parent_ino(file->f_path.dentry), DT_DIR))
|
||||
return 0;
|
||||
file->f_pos = 2;
|
||||
}
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
static inline int kc_kernel_getpeername(struct socket *sock, struct sockaddr *addr)
|
||||
{
|
||||
int addrlen = sizeof(struct sockaddr_in);
|
||||
int ret = kernel_getpeername(sock, addr, &addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
return -EAFNOSUPPORT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
#else
|
||||
#define kc_kernel_getsockname(sock, addr) kernel_getsockname(sock, addr)
|
||||
#define kc_kernel_getpeername(sock, addr) kernel_getpeername(sock, addr)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_CREATE_KERN_NET
|
||||
#define kc_sock_create_kern(family, type, proto, res) sock_create_kern(&init_net, family, type, proto, res)
|
||||
#else
|
||||
#define kc_sock_create_kern sock_create_kern
|
||||
#endif
|
||||
|
||||
#ifndef KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
ssize_t kc_generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
||||
size_t count, ssize_t written);
|
||||
#define generic_file_buffered_write kc_generic_file_buffered_write
|
||||
#ifdef KC_GENERIC_PERFORM_WRITE_KIOCB_IOV_ITER
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
iocb->ki_pos = pos;
|
||||
return generic_perform_write(iocb, iter);
|
||||
}
|
||||
#else
|
||||
static inline int kc_generic_perform_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
return generic_perform_write(file, iter, pos);
|
||||
}
|
||||
#endif
|
||||
#endif // KC_GENERIC_FILE_BUFFERED_WRITE
|
||||
|
||||
#ifndef KC_HAVE_BLK_OPF_T
|
||||
/* typedef __u32 __bitwise blk_opf_t; */
|
||||
typedef unsigned int blk_opf_t;
|
||||
#endif
|
||||
|
||||
#ifdef KC_LIST_CMP_CONST_ARG_LIST_HEAD
|
||||
#define KC_LIST_CMP_CONST const
|
||||
#else
|
||||
#define KC_LIST_CMP_CONST
|
||||
#endif
|
||||
|
||||
#ifdef KC_VMALLOC_PGPROT_T
|
||||
#define kc__vmalloc(size, gfp_mask) __vmalloc(size, gfp_mask, PAGE_KERNEL)
|
||||
#else
|
||||
#define kc__vmalloc __vmalloc
|
||||
#endif
|
||||
|
||||
#ifdef KC_VFS_METHOD_USER_NAMESPACE_ARG
|
||||
#define KC_VFS_NS_DEF struct user_namespace *mnt_user_ns,
|
||||
#define KC_VFS_NS mnt_user_ns,
|
||||
#define KC_VFS_INIT_NS &init_user_ns,
|
||||
#else
|
||||
#define KC_VFS_NS_DEF
|
||||
#define KC_VFS_NS
|
||||
#define KC_VFS_INIT_NS
|
||||
#endif
|
||||
|
||||
#ifdef KC_BIO_ALLOC_DEV_OPF_ARGS
|
||||
#define kc_bio_alloc bio_alloc
|
||||
#else
|
||||
#include <linux/bio.h>
|
||||
static inline struct bio *kc_bio_alloc(struct block_device *bdev, unsigned short nr_vecs,
|
||||
blk_opf_t opf, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *b = bio_alloc(gfp_mask, nr_vecs);
|
||||
if (b) {
|
||||
kc_bio_set_opf(b, opf);
|
||||
bio_set_dev(b, bdev);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KC_FIEMAP_PREP
|
||||
#define fiemap_prep(inode, fieinfo, start, len, flags) fiemap_check_flags(fieinfo, flags)
|
||||
#endif
|
||||
|
||||
#ifndef KC_KERNEL_OLD_TIMEVAL_STRUCT
|
||||
#define __kernel_old_timeval timeval
|
||||
#define ns_to_kernel_old_timeval(ktime) ns_to_timeval(ktime.tv64)
|
||||
#endif
|
||||
|
||||
#ifdef KC_SOCK_SET_SNDTIMEO
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
sock_set_sndtimeo(sock->sk, secs);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
sockptr_t kopt;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
|
||||
kopt = KERNEL_SOCKPTR(&tv);
|
||||
|
||||
return sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO_NEW,
|
||||
kopt, sizeof(tv));
|
||||
}
|
||||
#else
|
||||
#include <net/sock.h>
|
||||
static inline int kc_sock_set_sndtimeo(struct socket *sock, s64 secs)
|
||||
{
|
||||
struct timeval tv = { .tv_sec = secs, .tv_usec = 0 };
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_rcvtimeo(struct socket *sock, ktime_t to)
|
||||
{
|
||||
struct __kernel_old_timeval tv;
|
||||
|
||||
tv = ns_to_kernel_old_timeval(to);
|
||||
return kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_SETSOCKOPT_SOCKPTR_T
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
sockptr_t kopt = KERNEL_SOCKPTR(optval);
|
||||
return sock_setsockopt(sock, level, op, kopt, sizeof(optval));
|
||||
}
|
||||
#else
|
||||
static inline int kc_sock_setsockopt(struct socket *sock, int level, int op, int *optval, unsigned int optlen)
|
||||
{
|
||||
return kernel_setsockopt(sock, level, op, (char *)optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_HAVE_TCP_SET_SOCKFN
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepintvl(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
return tcp_sock_set_keepidle(sock->sk, val);
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
tcp_sock_set_user_timeout(sock->sk, val);
|
||||
return 0;
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
tcp_sock_set_nodelay(sock->sk);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#include <linux/net.h>
|
||||
#include <net/tcp.h>
|
||||
static inline int kc_tcp_sock_set_keepintvl(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_keepidle(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_user_timeout(struct socket *sock, int val)
|
||||
{
|
||||
int optval = val;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
static inline int kc_tcp_sock_set_nodelay(struct socket *sock)
|
||||
{
|
||||
int optval = 1;
|
||||
return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&optval, sizeof(optval));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef KC_INODE_DIO_END
|
||||
#define kc_inode_dio_end inode_dio_end
|
||||
#else
|
||||
#define kc_inode_dio_end inode_dio_done
|
||||
#endif
|
||||
|
||||
#ifndef KC_MM_VM_FAULT_T
|
||||
typedef unsigned int vm_fault_t;
|
||||
static inline vm_fault_t vmf_error(int err)
|
||||
{
|
||||
if (err == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -108,6 +108,16 @@ static inline void scoutfs_key_set_ones(struct scoutfs_key *key)
|
||||
memset(key->__pad, 0, sizeof(key->__pad));
|
||||
}
|
||||
|
||||
static inline bool scoutfs_key_is_ones(struct scoutfs_key *key)
|
||||
{
|
||||
return key->sk_zone == U8_MAX &&
|
||||
key->_sk_first == cpu_to_le64(U64_MAX) &&
|
||||
key->sk_type == U8_MAX &&
|
||||
key->_sk_second == cpu_to_le64(U64_MAX) &&
|
||||
key->_sk_third == cpu_to_le64(U64_MAX) &&
|
||||
key->_sk_fourth == U8_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a -1/0/1 comparison of keys.
|
||||
*
|
||||
@@ -115,8 +125,8 @@ static inline void scoutfs_key_set_ones(struct scoutfs_key *key)
|
||||
* other alternatives across keys that first differ in any of the
|
||||
* values. Say maybe 20% faster than memcmp.
|
||||
*/
|
||||
static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
struct scoutfs_key *b)
|
||||
static inline int scoutfs_key_compare(const struct scoutfs_key *a,
|
||||
const struct scoutfs_key *b)
|
||||
{
|
||||
return scoutfs_cmp(a->sk_zone, b->sk_zone) ?:
|
||||
scoutfs_cmp(le64_to_cpu(a->_sk_first), le64_to_cpu(b->_sk_first)) ?:
|
||||
@@ -132,10 +142,10 @@ static inline int scoutfs_key_compare(struct scoutfs_key *a,
|
||||
* 1: a_start > b_end
|
||||
* else 0: ranges overlap
|
||||
*/
|
||||
static inline int scoutfs_key_compare_ranges(struct scoutfs_key *a_start,
|
||||
struct scoutfs_key *a_end,
|
||||
struct scoutfs_key *b_start,
|
||||
struct scoutfs_key *b_end)
|
||||
static inline int scoutfs_key_compare_ranges(const struct scoutfs_key *a_start,
|
||||
const struct scoutfs_key *a_end,
|
||||
const struct scoutfs_key *b_start,
|
||||
const struct scoutfs_key *b_end)
|
||||
{
|
||||
return scoutfs_key_compare(a_end, b_start) < 0 ? -1 :
|
||||
scoutfs_key_compare(a_start, b_end) > 0 ? 1 :
|
||||
|
||||
618
kmod/src/lock.c
618
kmod/src/lock.c
@@ -12,12 +12,12 @@
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/preempt_mask.h> /* a rhel shed.h needed preempt_offset? */
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/posix_acl.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "lock.h"
|
||||
@@ -34,6 +34,10 @@
|
||||
#include "data.h"
|
||||
#include "xattr.h"
|
||||
#include "item.h"
|
||||
#include "omap.h"
|
||||
#include "util.h"
|
||||
#include "totl.h"
|
||||
#include "quota.h"
|
||||
|
||||
/*
|
||||
* scoutfs uses a lock service to manage item cache consistency between
|
||||
@@ -65,8 +69,6 @@
|
||||
* relative to that lock state we resend.
|
||||
*/
|
||||
|
||||
#define GRACE_PERIOD_KT ms_to_ktime(10)
|
||||
|
||||
/*
|
||||
* allocated per-super, freed on unmount.
|
||||
*/
|
||||
@@ -74,19 +76,19 @@ struct lock_info {
|
||||
struct super_block *sb;
|
||||
spinlock_t lock;
|
||||
bool shutdown;
|
||||
bool unmounting;
|
||||
struct rb_root lock_tree;
|
||||
struct rb_root lock_range_tree;
|
||||
struct shrinker shrinker;
|
||||
KC_DEFINE_SHRINKER(shrinker);
|
||||
struct list_head lru_list;
|
||||
unsigned long long lru_nr;
|
||||
struct workqueue_struct *workq;
|
||||
struct work_struct grant_work;
|
||||
struct list_head grant_list;
|
||||
struct delayed_work inv_dwork;
|
||||
struct work_struct inv_work;
|
||||
struct list_head inv_list;
|
||||
struct work_struct shrink_work;
|
||||
struct list_head shrink_list;
|
||||
atomic64_t next_refresh_gen;
|
||||
|
||||
struct dentry *tseq_dentry;
|
||||
struct scoutfs_tseq_tree tseq_tree;
|
||||
};
|
||||
@@ -122,21 +124,37 @@ static bool lock_modes_match(int granted, int requested)
|
||||
}
|
||||
|
||||
/*
|
||||
* invalidate cached data associated with an inode whose lock is going
|
||||
* Invalidate cached data associated with an inode whose lock is going
|
||||
* away.
|
||||
*
|
||||
* We try to drop cached dentries and inodes covered by the lock if they
|
||||
* aren't referenced. This removes them from the mount's open map and
|
||||
* allows deletions to be performed by unlink without having to wait for
|
||||
* remote cached inodes to be dropped.
|
||||
*
|
||||
* We kick the d_prune and iput off to async work because they can end
|
||||
* up in final iput and inode eviction item deletion which would
|
||||
* deadlock. d_prune->dput can end up in iput on parents in different
|
||||
* locks entirely.
|
||||
*/
|
||||
static void invalidate_inode(struct super_block *sb, u64 ino)
|
||||
{
|
||||
struct scoutfs_inode_info *si;
|
||||
struct inode *inode;
|
||||
|
||||
inode = scoutfs_ilookup(sb, ino);
|
||||
inode = scoutfs_ilookup_nowait_nonewfree(sb, ino);
|
||||
if (inode) {
|
||||
si = SCOUTFS_I(inode);
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_inode);
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
truncate_inode_pages(inode->i_mapping, 0);
|
||||
scoutfs_data_wait_changed(inode);
|
||||
}
|
||||
iput(inode);
|
||||
|
||||
forget_all_cached_acls(inode);
|
||||
|
||||
scoutfs_inode_queue_iput(inode, SI_IPUT_FLAG_PRUNE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,6 +187,9 @@ static int lock_invalidate(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (lock->start.sk_zone == SCOUTFS_QUOTA_ZONE && !lock_mode_can_read(mode))
|
||||
scoutfs_quota_invalidate(sb);
|
||||
|
||||
/* have to invalidate if we're not in the only usable case */
|
||||
if (!(prev == SCOUTFS_LOCK_WRITE && mode == SCOUTFS_LOCK_READ)) {
|
||||
retry:
|
||||
@@ -187,6 +208,7 @@ retry:
|
||||
}
|
||||
spin_unlock(&lock->cov_list_lock);
|
||||
|
||||
/* invalidate inodes after removing coverage so drop/evict aren't covered */
|
||||
if (lock->start.sk_zone == SCOUTFS_FS_ZONE) {
|
||||
ino = le64_to_cpu(lock->start.ski_ino);
|
||||
last = le64_to_cpu(lock->end.ski_ino);
|
||||
@@ -224,11 +246,11 @@ static void lock_free(struct lock_info *linfo, struct scoutfs_lock *lock)
|
||||
BUG_ON(!RB_EMPTY_NODE(&lock->node));
|
||||
BUG_ON(!RB_EMPTY_NODE(&lock->range_node));
|
||||
BUG_ON(!list_empty(&lock->lru_head));
|
||||
BUG_ON(!list_empty(&lock->grant_head));
|
||||
BUG_ON(!list_empty(&lock->inv_head));
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
BUG_ON(!list_empty(&lock->cov_list));
|
||||
|
||||
kfree(lock->inode_deletion_data);
|
||||
kfree(lock);
|
||||
}
|
||||
|
||||
@@ -251,8 +273,8 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
RB_CLEAR_NODE(&lock->node);
|
||||
RB_CLEAR_NODE(&lock->range_node);
|
||||
INIT_LIST_HEAD(&lock->lru_head);
|
||||
INIT_LIST_HEAD(&lock->grant_head);
|
||||
INIT_LIST_HEAD(&lock->inv_head);
|
||||
INIT_LIST_HEAD(&lock->inv_list);
|
||||
INIT_LIST_HEAD(&lock->shrink_head);
|
||||
spin_lock_init(&lock->cov_list_lock);
|
||||
INIT_LIST_HEAD(&lock->cov_list);
|
||||
@@ -262,6 +284,7 @@ static struct scoutfs_lock *lock_alloc(struct super_block *sb,
|
||||
lock->sb = sb;
|
||||
init_waitqueue_head(&lock->waitq);
|
||||
lock->mode = SCOUTFS_LOCK_NULL;
|
||||
lock->invalidating_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
atomic64_set(&lock->forest_bloom_nr, 0);
|
||||
|
||||
@@ -279,6 +302,7 @@ static void lock_inc_count(unsigned int *counts, enum scoutfs_lock_mode mode)
|
||||
static void lock_dec_count(unsigned int *counts, enum scoutfs_lock_mode mode)
|
||||
{
|
||||
BUG_ON(mode < 0 || mode >= SCOUTFS_LOCK_NR_MODES);
|
||||
BUG_ON(counts[mode] == 0);
|
||||
counts[mode]--;
|
||||
}
|
||||
|
||||
@@ -298,23 +322,6 @@ static bool lock_counts_match(int granted, unsigned int *counts)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if there are any mode counts that match with the desired
|
||||
* mode. There can be other non-matching counts as well but we're only
|
||||
* testing for the existence of any matching counts.
|
||||
*/
|
||||
static bool lock_count_match_exists(int desired, unsigned int *counts)
|
||||
{
|
||||
enum scoutfs_lock_mode mode;
|
||||
|
||||
for (mode = 0; mode < SCOUTFS_LOCK_NR_MODES; mode++) {
|
||||
if (counts[mode] && lock_modes_match(desired, mode))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* An idle lock has nothing going on. It can be present in the lru and
|
||||
* can be freed by the final put when it has a null mode.
|
||||
@@ -532,45 +539,15 @@ static void put_lock(struct lock_info *linfo,struct scoutfs_lock *lock)
|
||||
}
|
||||
|
||||
/*
|
||||
* Locks have a grace period that extends after activity and prevents
|
||||
* invalidation. It's intended to let nodes do reasonable batches of
|
||||
* work as locks ping pong between nodes that are doing conflicting
|
||||
* work.
|
||||
*/
|
||||
static void extend_grace(struct super_block *sb, struct scoutfs_lock *lock)
|
||||
{
|
||||
ktime_t now = ktime_get();
|
||||
|
||||
if (ktime_after(now, lock->grace_deadline))
|
||||
scoutfs_inc_counter(sb, lock_grace_set);
|
||||
else
|
||||
scoutfs_inc_counter(sb, lock_grace_extended);
|
||||
|
||||
lock->grace_deadline = ktime_add(now, GRACE_PERIOD_KT);
|
||||
}
|
||||
|
||||
static void queue_grant_work(struct lock_info *linfo)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
|
||||
if (!list_empty(&linfo->grant_list) && !linfo->shutdown)
|
||||
queue_work(linfo->workq, &linfo->grant_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* We immediately queue work on the assumption that the caller might
|
||||
* have made a change (set a lock mode) which can let one of the
|
||||
* invalidating locks make forward progress, even if other locks are
|
||||
* waiting for their grace period to elapse. It's a trade-off between
|
||||
* invalidation latency and burning cpu repeatedly finding that locks
|
||||
* are still in their grace period.
|
||||
* The caller has made a change (set a lock mode) which can let one of the
|
||||
* invalidating locks make forward progress.
|
||||
*/
|
||||
static void queue_inv_work(struct lock_info *linfo)
|
||||
{
|
||||
assert_spin_locked(&linfo->lock);
|
||||
|
||||
if (!list_empty(&linfo->inv_list) && !linfo->shutdown)
|
||||
mod_delayed_work(linfo->workq, &linfo->inv_dwork, 0);
|
||||
if (!list_empty(&linfo->inv_list))
|
||||
queue_work(linfo->workq, &linfo->inv_work);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -618,80 +595,17 @@ static void bug_on_inconsistent_grant_cache(struct super_block *sb,
|
||||
}
|
||||
|
||||
/*
|
||||
* Each lock has received a grant response message from the server.
|
||||
* The client is receiving a grant response message from the server.
|
||||
* This is being called synchronously in the networking receive path so
|
||||
* our work should be quick and reasonably non-blocking.
|
||||
*
|
||||
* Grant responses can be reordered with incoming invalidation requests
|
||||
* from the server so we have to be careful to only set the new mode
|
||||
* once the old mode matches.
|
||||
*
|
||||
* We extend the grace period as we grant the lock if there is a waiting
|
||||
* locker who can use the lock. This stops invalidation from pulling
|
||||
* the granted lock out from under the requester, resulting in a lot of
|
||||
* churn with no forward progress. Using the grace period avoids having
|
||||
* to identify a specific waiter and give it an acquired lock. It's
|
||||
* also very similar to waking up the locker and having it win the race
|
||||
* against the invalidation. In that case they'd extend the grace
|
||||
* period anyway as they unlock.
|
||||
*/
|
||||
static void lock_grant_worker(struct work_struct *work)
|
||||
{
|
||||
struct lock_info *linfo = container_of(work, struct lock_info,
|
||||
grant_work);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_net_lock_grant_response *gr;
|
||||
struct scoutfs_net_lock *nl;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_grant_work);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->grant_list, grant_head) {
|
||||
gr = &lock->grant_resp;
|
||||
nl = &lock->grant_resp.nl;
|
||||
|
||||
/* wait for reordered invalidation to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
continue;
|
||||
|
||||
bug_on_inconsistent_grant_cache(sb, lock, nl->old_mode,
|
||||
nl->new_mode);
|
||||
|
||||
if (!lock_mode_can_read(nl->old_mode) &&
|
||||
lock_mode_can_read(nl->new_mode)) {
|
||||
lock->refresh_gen =
|
||||
atomic64_inc_return(&linfo->next_refresh_gen);
|
||||
}
|
||||
|
||||
lock->request_pending = 0;
|
||||
lock->mode = nl->new_mode;
|
||||
lock->write_version = le64_to_cpu(nl->write_version);
|
||||
lock->roots = gr->roots;
|
||||
|
||||
if (lock_count_match_exists(nl->new_mode, lock->waiters))
|
||||
extend_grace(sb, lock);
|
||||
|
||||
trace_scoutfs_lock_granted(sb, lock);
|
||||
list_del_init(&lock->grant_head);
|
||||
wake_up(&lock->waitq);
|
||||
put_lock(linfo, lock);
|
||||
}
|
||||
|
||||
/* invalidations might be waiting for our reordered grant */
|
||||
queue_inv_work(linfo);
|
||||
spin_unlock(&linfo->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The client is receiving a grant response message from the server. We
|
||||
* find the lock, record the response, and add it to the list for grant
|
||||
* work to process.
|
||||
* The server's state machine can immediately send an invalidate request
|
||||
* after sending this grant response. We won't process the incoming
|
||||
* invalidate request until after processing this grant response.
|
||||
*/
|
||||
int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
struct scoutfs_net_lock_grant_response *gr)
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
struct scoutfs_net_lock *nl = &gr->nl;
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
|
||||
@@ -705,62 +619,63 @@ int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
trace_scoutfs_lock_grant_response(sb, lock);
|
||||
BUG_ON(!lock->request_pending);
|
||||
|
||||
lock->grant_resp = *gr;
|
||||
list_add_tail(&lock->grant_head, &linfo->grant_list);
|
||||
queue_grant_work(linfo);
|
||||
bug_on_inconsistent_grant_cache(sb, lock, nl->old_mode, nl->new_mode);
|
||||
|
||||
if (!lock_mode_can_read(nl->old_mode) && lock_mode_can_read(nl->new_mode))
|
||||
lock->refresh_gen = atomic64_inc_return(&linfo->next_refresh_gen);
|
||||
|
||||
lock->request_pending = 0;
|
||||
lock->mode = nl->new_mode;
|
||||
lock->write_seq = le64_to_cpu(nl->write_seq);
|
||||
|
||||
trace_scoutfs_lock_granted(sb, lock);
|
||||
wake_up(&lock->waitq);
|
||||
put_lock(linfo, lock);
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct inv_req {
|
||||
struct list_head head;
|
||||
struct scoutfs_lock *lock;
|
||||
u64 net_id;
|
||||
struct scoutfs_net_lock nl;
|
||||
};
|
||||
|
||||
/*
|
||||
* Each lock has received a lock invalidation request from the server
|
||||
* which specifies a new mode for the lock. The server will only send
|
||||
* one invalidation request at a time for each lock.
|
||||
* which specifies a new mode for the lock. Our processing state
|
||||
* machine and server failover and lock recovery can both conspire to
|
||||
* give us triplicate invalidation requests. The incoming requests for
|
||||
* a given lock need to be processed in order, but we can process locks
|
||||
* in any order.
|
||||
*
|
||||
* This is an unsolicited request from the server so it can arrive at
|
||||
* any time after we make the server aware of the lock by initially
|
||||
* requesting it. We wait for users of the current mode to unlock
|
||||
* before invalidating.
|
||||
* any time after we make the server aware of the lock. We wait for
|
||||
* users of the current mode to unlock before invalidating.
|
||||
*
|
||||
* This can arrive on behalf of our request for a mode that conflicts
|
||||
* with our current mode. We have to proceed while we have a request
|
||||
* pending. We can also be racing with shrink requests being sent while
|
||||
* we're invalidating.
|
||||
*
|
||||
* This can be processed concurrently and experience reordering with a
|
||||
* grant response sent back-to-back from the server. We carefully only
|
||||
* invalidate once the lock mode matches what the server told us to
|
||||
* invalidate.
|
||||
*
|
||||
* We delay invalidation processing until a grace period has elapsed
|
||||
* since the last unlock. The intent is to let users do a reasonable
|
||||
* batch of work before dropping the lock. Continuous unlocking can
|
||||
* continuously extend the deadline.
|
||||
*
|
||||
* Before we start invalidating the lock we set the lock to the new
|
||||
* mode, preventing further incompatible users of the old mode from
|
||||
* using the lock while we're invalidating.
|
||||
*
|
||||
* This does a lot of serialized inode invalidation in one context and
|
||||
* performs a lot of repeated calls to sync. It would be nice to get
|
||||
* some concurrent inode invalidation and to more carefully only call
|
||||
* sync when needed.
|
||||
* using the lock while we're invalidating. We record the previously
|
||||
* granted mode so that we can send lock recover responses with the old
|
||||
* granted mode during invalidation.
|
||||
*/
|
||||
static void lock_invalidate_worker(struct work_struct *work)
|
||||
{
|
||||
struct lock_info *linfo = container_of(work, struct lock_info,
|
||||
inv_dwork.work);
|
||||
struct lock_info *linfo = container_of(work, struct lock_info, inv_work);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_net_lock *nl;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
unsigned long delay = MAX_JIFFY_OFFSET;
|
||||
ktime_t now = ktime_get();
|
||||
ktime_t deadline;
|
||||
struct inv_req *ireq;
|
||||
LIST_HEAD(ready);
|
||||
u64 net_id;
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_work);
|
||||
@@ -768,26 +683,15 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &linfo->inv_list, inv_head) {
|
||||
nl = &lock->inv_nl;
|
||||
|
||||
/* wait for reordered grant to finish */
|
||||
if (lock->mode != nl->old_mode)
|
||||
continue;
|
||||
ireq = list_first_entry(&lock->inv_list, struct inv_req, head);
|
||||
nl = &ireq->nl;
|
||||
|
||||
/* wait until incompatible holders unlock */
|
||||
if (!lock_counts_match(nl->new_mode, lock->users))
|
||||
continue;
|
||||
|
||||
/* skip if grace hasn't elapsed, record earliest */
|
||||
deadline = lock->grace_deadline;
|
||||
if (!linfo->shutdown && ktime_before(now, deadline)) {
|
||||
delay = min(delay,
|
||||
nsecs_to_jiffies(ktime_to_ns(
|
||||
ktime_sub(deadline, now))));
|
||||
scoutfs_inc_counter(linfo->sb, lock_grace_wait);
|
||||
continue;
|
||||
}
|
||||
/* set the new mode, no incompatible users during inval */
|
||||
/* set the new mode, no incompatible users during inval, recov needs old */
|
||||
lock->invalidating_mode = lock->mode;
|
||||
lock->mode = nl->new_mode;
|
||||
|
||||
/* move everyone that's ready to our private list */
|
||||
@@ -797,18 +701,23 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
if (list_empty(&ready))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
/* invalidate once the lock is read */
|
||||
list_for_each_entry(lock, &ready, inv_head) {
|
||||
nl = &lock->inv_nl;
|
||||
net_id = lock->inv_net_id;
|
||||
ireq = list_first_entry(&lock->inv_list, struct inv_req, head);
|
||||
nl = &ireq->nl;
|
||||
|
||||
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
|
||||
BUG_ON(ret);
|
||||
/* only lock protocol, inv can't call subsystems after shutdown */
|
||||
if (!linfo->shutdown) {
|
||||
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
/* respond with the key and modes from the request */
|
||||
ret = scoutfs_client_lock_response(sb, net_id, nl);
|
||||
/* respond with the key and modes from the request, server might have died */
|
||||
ret = scoutfs_client_lock_response(sb, ireq->net_id, nl);
|
||||
if (ret == -ENOTCONN)
|
||||
ret = 0;
|
||||
BUG_ON(ret);
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_response);
|
||||
@@ -818,53 +727,91 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &ready, inv_head) {
|
||||
list_del_init(&lock->inv_head);
|
||||
ireq = list_first_entry(&lock->inv_list, struct inv_req, head);
|
||||
|
||||
lock->invalidate_pending = 0;
|
||||
trace_scoutfs_lock_invalidated(sb, lock);
|
||||
wake_up(&lock->waitq);
|
||||
|
||||
list_del(&ireq->head);
|
||||
kfree(ireq);
|
||||
|
||||
lock->invalidating_mode = SCOUTFS_LOCK_NULL;
|
||||
|
||||
if (list_empty(&lock->inv_list)) {
|
||||
/* finish if another request didn't arrive */
|
||||
list_del_init(&lock->inv_head);
|
||||
lock->invalidate_pending = 0;
|
||||
wake_up(&lock->waitq);
|
||||
} else {
|
||||
/* another request arrived, back on the list and requeue */
|
||||
list_move_tail(&lock->inv_head, &linfo->inv_list);
|
||||
queue_inv_work(linfo);
|
||||
}
|
||||
|
||||
put_lock(linfo, lock);
|
||||
}
|
||||
|
||||
/* grant might have been waiting for invalidate request */
|
||||
queue_grant_work(linfo);
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
out:
|
||||
/* queue delayed work if invalidations waiting on grace deadline */
|
||||
if (delay != MAX_JIFFY_OFFSET)
|
||||
queue_delayed_work(linfo->workq, &linfo->inv_dwork, delay);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record an incoming invalidate request from the server and add its lock
|
||||
* to the list for processing.
|
||||
* Add an incoming invalidation request to the end of the list on the
|
||||
* lock and queue it for blocking invalidation work. This is being
|
||||
* called synchronously in the net recv path to avoid reordering with
|
||||
* grants that were sent immediately before the server sent this
|
||||
* invalidation.
|
||||
*
|
||||
* This is trusting the server and will crash if it's sent bad requests :/
|
||||
* Incoming invalidation requests are a function of the remote lock
|
||||
* server's state machine and are slightly decoupled from our lock
|
||||
* state. We can receive duplicate requests if the server is quick
|
||||
* enough to send the next request after we send a previous reply, or if
|
||||
* pending invalidation spans server failover and lock recovery.
|
||||
*
|
||||
* Similarly, we can get a request to invalidate a lock we don't have if
|
||||
* invalidation finished just after lock recovery to a new server.
|
||||
* Happily we can just reply because we satisfy the invalidation
|
||||
* response promise to not be using the old lock's mode if the lock
|
||||
* doesn't exist.
|
||||
*/
|
||||
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *lock = NULL;
|
||||
struct inv_req *ireq;
|
||||
int ret = 0;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_request);
|
||||
|
||||
ireq = kmalloc(sizeof(struct inv_req), GFP_NOFS);
|
||||
BUG_ON(!ireq); /* lock server doesn't handle response errors */
|
||||
if (ireq == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
lock = get_lock(sb, &nl->key);
|
||||
BUG_ON(!lock);
|
||||
if (lock) {
|
||||
BUG_ON(lock->invalidate_pending);
|
||||
lock->invalidate_pending = 1;
|
||||
lock->inv_nl = *nl;
|
||||
lock->inv_net_id = net_id;
|
||||
list_add_tail(&lock->inv_head, &linfo->inv_list);
|
||||
trace_scoutfs_lock_invalidate_request(sb, lock);
|
||||
queue_inv_work(linfo);
|
||||
ireq->lock = lock;
|
||||
ireq->net_id = net_id;
|
||||
ireq->nl = *nl;
|
||||
if (list_empty(&lock->inv_list)) {
|
||||
list_add_tail(&lock->inv_head, &linfo->inv_list);
|
||||
lock->invalidate_pending = 1;
|
||||
queue_inv_work(linfo);
|
||||
}
|
||||
list_add_tail(&ireq->head, &lock->inv_list);
|
||||
}
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
if (!lock) {
|
||||
ret = scoutfs_client_lock_response(sb, net_id, nl);
|
||||
BUG_ON(ret); /* lock server doesn't fence timed out client requests */
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -879,6 +826,7 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_net_lock_recover *nlr;
|
||||
enum scoutfs_lock_mode mode;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *next;
|
||||
struct rb_node *node;
|
||||
@@ -899,10 +847,15 @@ int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
|
||||
for (i = 0; lock && i < SCOUTFS_NET_LOCK_MAX_RECOVER_NR; i++) {
|
||||
|
||||
if (lock->invalidating_mode != SCOUTFS_LOCK_NULL)
|
||||
mode = lock->invalidating_mode;
|
||||
else
|
||||
mode = lock->mode;
|
||||
|
||||
nlr->locks[i].key = lock->start;
|
||||
nlr->locks[i].write_version = cpu_to_le64(lock->write_version);
|
||||
nlr->locks[i].old_mode = lock->mode;
|
||||
nlr->locks[i].new_mode = lock->mode;
|
||||
nlr->locks[i].write_seq = cpu_to_le64(lock->write_seq);
|
||||
nlr->locks[i].old_mode = mode;
|
||||
nlr->locks[i].new_mode = mode;
|
||||
|
||||
node = rb_next(&lock->node);
|
||||
if (node)
|
||||
@@ -995,7 +948,7 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
|
||||
lock_inc_count(lock->waiters, mode);
|
||||
|
||||
for (;;) {
|
||||
if (linfo->shutdown) {
|
||||
if (WARN_ON_ONCE(linfo->shutdown)) {
|
||||
ret = -ESHUTDOWN;
|
||||
break;
|
||||
}
|
||||
@@ -1040,8 +993,14 @@ static int lock_key_range(struct super_block *sb, enum scoutfs_lock_mode mode, i
|
||||
|
||||
trace_scoutfs_lock_wait(sb, lock);
|
||||
|
||||
ret = wait_event_interruptible(lock->waitq,
|
||||
lock_wait_cond(sb, lock, mode));
|
||||
if (flags & SCOUTFS_LKF_INTERRUPTIBLE) {
|
||||
ret = wait_event_interruptible(lock->waitq,
|
||||
lock_wait_cond(sb, lock, mode));
|
||||
} else {
|
||||
wait_event(lock->waitq, lock_wait_cond(sb, lock, mode));
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
if (ret)
|
||||
break;
|
||||
@@ -1098,7 +1057,7 @@ int scoutfs_lock_inode(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
goto out;
|
||||
|
||||
if (flags & SCOUTFS_LKF_REFRESH_INODE) {
|
||||
ret = scoutfs_inode_refresh(inode, *lock, flags);
|
||||
ret = scoutfs_inode_refresh(inode, *lock);
|
||||
if (ret < 0) {
|
||||
scoutfs_unlock(sb, *lock, mode);
|
||||
*lock = NULL;
|
||||
@@ -1259,37 +1218,65 @@ int scoutfs_lock_inode_index(struct super_block *sb, enum scoutfs_lock_mode mode
|
||||
}
|
||||
|
||||
/*
|
||||
* The rid lock protects a mount's private persistent items in the rid
|
||||
* zone. It's held for the duration of the mount. It lets the mount
|
||||
* modify the rid items at will and signals to other mounts that we're
|
||||
* still alive and our rid items shouldn't be reclaimed.
|
||||
* Orphan items are stored in their own zone which are modified with
|
||||
* shared write_only locks and are read inconsistently without locks by
|
||||
* background scanning work.
|
||||
*
|
||||
* Being held for the entire mount prevents other nodes from reclaiming
|
||||
* our items, like free blocks, when it would make sense for them to be
|
||||
* able to. Maybe we have a bunch free and they're trying to allocate
|
||||
* and are getting ENOSPC.
|
||||
* Since we only use write_only locks we just lock the entire zone, but
|
||||
* the api provides the inode in case we ever change the locking scheme.
|
||||
*/
|
||||
int scoutfs_lock_rid(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 rid, struct scoutfs_lock **lock)
|
||||
int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int flags, u64 ino,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_RID_ZONE;
|
||||
start.sko_rid = cpu_to_le64(rid);
|
||||
start.sk_zone = SCOUTFS_ORPHAN_ZONE;
|
||||
start.sko_ino = 0;
|
||||
start.sk_type = SCOUTFS_ORPHAN_TYPE;
|
||||
|
||||
scoutfs_key_set_ones(&end);
|
||||
end.sk_zone = SCOUTFS_RID_ZONE;
|
||||
end.sko_rid = cpu_to_le64(rid);
|
||||
scoutfs_key_set_zeros(&end);
|
||||
end.sk_zone = SCOUTFS_ORPHAN_ZONE;
|
||||
end.sko_ino = cpu_to_le64(U64_MAX);
|
||||
end.sk_type = SCOUTFS_ORPHAN_TYPE;
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_totl_set_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_xattr_indx_get_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
struct scoutfs_key end;
|
||||
|
||||
scoutfs_quota_get_lock_range(&start, &end);
|
||||
|
||||
return lock_key_range(sb, mode, flags, &start, &end, lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* As we unlock we always extend the grace period to give the caller
|
||||
* another pass at the lock before its invalidated.
|
||||
*/
|
||||
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock, enum scoutfs_lock_mode mode)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
@@ -1302,7 +1289,6 @@ void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock, enum scou
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
lock_dec_count(lock->users, mode);
|
||||
extend_grace(sb, lock);
|
||||
if (lock_mode_can_write(mode))
|
||||
lock->dirty_trans_seq = scoutfs_trans_sample_seq(sb);
|
||||
|
||||
@@ -1385,7 +1371,7 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
|
||||
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
enum scoutfs_lock_mode mode)
|
||||
{
|
||||
signed char lock_mode = ACCESS_ONCE(lock->mode);
|
||||
signed char lock_mode = READ_ONCE(lock->mode);
|
||||
|
||||
return lock_modes_match(lock_mode, mode) &&
|
||||
scoutfs_key_compare_ranges(key, key,
|
||||
@@ -1440,6 +1426,17 @@ static void lock_shrink_worker(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long lock_count_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lock_info *linfo = KC_SHRINKER_CONTAINER_OF(shrink, struct lock_info);
|
||||
struct super_block *sb = linfo->sb;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_count_objects);
|
||||
|
||||
return shrinker_min_long(linfo->lru_nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the shrinking process for locks on the lru. If a lock is on
|
||||
* the lru then it can't have any active users. We don't want to block
|
||||
@@ -1452,21 +1449,18 @@ static void lock_shrink_worker(struct work_struct *work)
|
||||
* mode which will prevent the lock from being freed when the null
|
||||
* response arrives.
|
||||
*/
|
||||
static int scoutfs_lock_shrink(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
static unsigned long lock_scan_objects(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lock_info *linfo = container_of(shrink, struct lock_info,
|
||||
shrinker);
|
||||
struct lock_info *linfo = KC_SHRINKER_CONTAINER_OF(shrink, struct lock_info);
|
||||
struct super_block *sb = linfo->sb;
|
||||
struct scoutfs_lock *lock;
|
||||
struct scoutfs_lock *tmp;
|
||||
unsigned long nr;
|
||||
unsigned long freed = 0;
|
||||
unsigned long nr = sc->nr_to_scan;
|
||||
bool added = false;
|
||||
int ret;
|
||||
|
||||
nr = sc->nr_to_scan;
|
||||
if (nr == 0)
|
||||
goto out;
|
||||
scoutfs_inc_counter(sb, lock_scan_objects);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
@@ -1477,13 +1471,14 @@ restart:
|
||||
BUG_ON(lock->mode == SCOUTFS_LOCK_NULL);
|
||||
BUG_ON(!list_empty(&lock->shrink_head));
|
||||
|
||||
if (linfo->shutdown || nr-- == 0)
|
||||
if (nr-- == 0)
|
||||
break;
|
||||
|
||||
__lock_del_lru(linfo, lock);
|
||||
lock->request_pending = 1;
|
||||
list_add_tail(&lock->shrink_head, &linfo->shrink_list);
|
||||
added = true;
|
||||
freed++;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_shrink_attempted);
|
||||
trace_scoutfs_lock_shrink(sb, lock);
|
||||
@@ -1498,13 +1493,11 @@ restart:
|
||||
if (added)
|
||||
queue_work(linfo->workq, &linfo->shrink_work);
|
||||
|
||||
out:
|
||||
ret = min_t(unsigned long, linfo->lru_nr, INT_MAX);
|
||||
trace_scoutfs_lock_shrink_exit(sb, sc->nr_to_scan, ret);
|
||||
return ret;
|
||||
trace_scoutfs_lock_shrink_exit(sb, sc->nr_to_scan, freed);
|
||||
return freed;
|
||||
}
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr)
|
||||
void scoutfs_free_unused_locks(struct super_block *sb)
|
||||
{
|
||||
struct lock_info *linfo = SCOUTFS_SB(sb)->lock_info;
|
||||
struct shrink_control sc = {
|
||||
@@ -1512,7 +1505,7 @@ void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr)
|
||||
.nr_to_scan = INT_MAX,
|
||||
};
|
||||
|
||||
linfo->shrinker.shrink(&linfo->shrinker, &sc);
|
||||
lock_scan_objects(KC_SHRINKER_FN(&linfo->shrinker), &sc);
|
||||
}
|
||||
|
||||
static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
@@ -1532,15 +1525,80 @@ static void lock_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is going to be calling _destroy soon and, critically, is
|
||||
* about to shutdown networking before calling us so that we don't get
|
||||
* any callbacks while we're destroying. We have to ensure that we
|
||||
* won't call networking after this returns.
|
||||
* shrink_dcache_for_umount() tears down dentries with no locking. We
|
||||
* need to make sure that our invalidation won't touch dentries before
|
||||
* we return and the caller calls the generic vfs unmount path.
|
||||
*/
|
||||
void scoutfs_lock_unmount_begin(struct super_block *sb)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
|
||||
if (linfo) {
|
||||
linfo->unmounting = true;
|
||||
flush_work(&linfo->inv_work);
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_lock_flush_invalidate(struct super_block *sb)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
|
||||
if (linfo)
|
||||
flush_work(&linfo->inv_work);
|
||||
}
|
||||
|
||||
static u64 get_held_lock_refresh_gen(struct super_block *sb, struct scoutfs_key *start)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
u64 refresh_gen = 0;
|
||||
|
||||
/* this can be called from all manner of places */
|
||||
if (!linfo)
|
||||
return 0;
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
lock = lock_lookup(sb, start, NULL);
|
||||
if (lock) {
|
||||
if (lock_mode_can_read(lock->mode))
|
||||
refresh_gen = lock->refresh_gen;
|
||||
}
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return refresh_gen;
|
||||
}
|
||||
|
||||
u64 scoutfs_lock_ino_refresh_gen(struct super_block *sb, u64 ino)
|
||||
{
|
||||
struct scoutfs_key start;
|
||||
|
||||
scoutfs_key_set_zeros(&start);
|
||||
start.sk_zone = SCOUTFS_FS_ZONE;
|
||||
start.ski_ino = cpu_to_le64(ino & ~(u64)SCOUTFS_LOCK_INODE_GROUP_MASK);
|
||||
|
||||
return get_held_lock_refresh_gen(sb, &start);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is going to be shutting down transactions and the client.
|
||||
* We need to make sure that locking won't call either after we return.
|
||||
*
|
||||
* Internal fs threads can be using locking, and locking can have async
|
||||
* work pending. We use ->shutdown to force callers to return
|
||||
* -ESHUTDOWN and to prevent the future queueing of work that could call
|
||||
* networking. Locks whose work is stopped will be torn down by _destroy.
|
||||
* At this point all fs callers and internal services that use locks
|
||||
* should have stopped. We won't have any callers initiating lock
|
||||
* transitions and sending requests. We set the shutdown flag to catch
|
||||
* anyone who breaks this rule.
|
||||
*
|
||||
* We unregister the shrinker so that we won't try and send null
|
||||
* requests in response to memory pressure. The locks will all be
|
||||
* unceremoniously dropped once we get a farewell response from the
|
||||
* server which indicates that they destroyed our locking state.
|
||||
*
|
||||
* We will still respond to invalidation requests that have to be
|
||||
* processed to let unmount in other mounts acquire locks and make
|
||||
* progress. However, we don't fully process the invalidation because
|
||||
* we're shutting down. We only update the lock state and send the
|
||||
* response. We shouldn't have any users of locking that require
|
||||
* invalidation correctness at this point.
|
||||
*/
|
||||
void scoutfs_lock_shutdown(struct super_block *sb)
|
||||
{
|
||||
@@ -1553,19 +1611,18 @@ void scoutfs_lock_shutdown(struct super_block *sb)
|
||||
|
||||
trace_scoutfs_lock_shutdown(sb, linfo);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
/* stop the shrinker from queueing work */
|
||||
KC_UNREGISTER_SHRINKER(&linfo->shrinker);
|
||||
flush_work(&linfo->shrink_work);
|
||||
|
||||
/* cause current and future lock calls to return errors */
|
||||
spin_lock(&linfo->lock);
|
||||
linfo->shutdown = true;
|
||||
for (node = rb_first(&linfo->lock_tree); node; node = rb_next(node)) {
|
||||
lock = rb_entry(node, struct scoutfs_lock, node);
|
||||
wake_up(&lock->waitq);
|
||||
}
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
flush_work(&linfo->grant_work);
|
||||
flush_delayed_work(&linfo->inv_dwork);
|
||||
flush_work(&linfo->shrink_work);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1585,6 +1642,8 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
struct inv_req *ireq_tmp;
|
||||
struct inv_req *ireq;
|
||||
struct rb_node *node;
|
||||
enum scoutfs_lock_mode mode;
|
||||
|
||||
@@ -1593,8 +1652,6 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
|
||||
trace_scoutfs_lock_destroy(sb, linfo);
|
||||
|
||||
/* stop the shrinker from queueing work */
|
||||
unregister_shrinker(&linfo->shrinker);
|
||||
|
||||
/* make sure that no one's actively using locks */
|
||||
spin_lock(&linfo->lock);
|
||||
@@ -1613,8 +1670,6 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
if (linfo->workq) {
|
||||
/* pending grace work queues normal work */
|
||||
flush_workqueue(linfo->workq);
|
||||
/* now all work won't queue itself */
|
||||
destroy_workqueue(linfo->workq);
|
||||
}
|
||||
@@ -1631,22 +1686,31 @@ void scoutfs_lock_destroy(struct super_block *sb)
|
||||
* of free).
|
||||
*/
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
node = rb_first(&linfo->lock_tree);
|
||||
while (node) {
|
||||
lock = rb_entry(node, struct scoutfs_lock, node);
|
||||
node = rb_next(node);
|
||||
|
||||
list_for_each_entry_safe(ireq, ireq_tmp, &lock->inv_list, head) {
|
||||
list_del_init(&ireq->head);
|
||||
put_lock(linfo, ireq->lock);
|
||||
kfree(ireq);
|
||||
}
|
||||
|
||||
lock->request_pending = 0;
|
||||
if (!list_empty(&lock->lru_head))
|
||||
__lock_del_lru(linfo, lock);
|
||||
if (!list_empty(&lock->grant_head))
|
||||
list_del_init(&lock->grant_head);
|
||||
if (!list_empty(&lock->inv_head))
|
||||
if (!list_empty(&lock->inv_head)) {
|
||||
list_del_init(&lock->inv_head);
|
||||
lock->invalidate_pending = 0;
|
||||
}
|
||||
if (!list_empty(&lock->shrink_head))
|
||||
list_del_init(&lock->shrink_head);
|
||||
lock_remove(linfo, lock);
|
||||
lock_free(linfo, lock);
|
||||
}
|
||||
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
kfree(linfo);
|
||||
@@ -1667,13 +1731,11 @@ int scoutfs_lock_setup(struct super_block *sb)
|
||||
spin_lock_init(&linfo->lock);
|
||||
linfo->lock_tree = RB_ROOT;
|
||||
linfo->lock_range_tree = RB_ROOT;
|
||||
linfo->shrinker.shrink = scoutfs_lock_shrink;
|
||||
linfo->shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&linfo->shrinker);
|
||||
KC_INIT_SHRINKER_FUNCS(&linfo->shrinker, lock_count_objects,
|
||||
lock_scan_objects);
|
||||
KC_REGISTER_SHRINKER(&linfo->shrinker, "scoutfs-lock:" SCSBF, SCSB_ARGS(sb));
|
||||
INIT_LIST_HEAD(&linfo->lru_list);
|
||||
INIT_WORK(&linfo->grant_work, lock_grant_worker);
|
||||
INIT_LIST_HEAD(&linfo->grant_list);
|
||||
INIT_DELAYED_WORK(&linfo->inv_dwork, lock_invalidate_worker);
|
||||
INIT_WORK(&linfo->inv_work, lock_invalidate_worker);
|
||||
INIT_LIST_HEAD(&linfo->inv_list);
|
||||
INIT_WORK(&linfo->shrink_work, lock_shrink_worker);
|
||||
INIT_LIST_HEAD(&linfo->shrink_list);
|
||||
|
||||
@@ -6,12 +6,15 @@
|
||||
|
||||
#define SCOUTFS_LKF_REFRESH_INODE 0x01 /* update stale inode from item */
|
||||
#define SCOUTFS_LKF_NONBLOCK 0x02 /* only use already held locks */
|
||||
#define SCOUTFS_LKF_INVALID (~((SCOUTFS_LKF_NONBLOCK << 1) - 1))
|
||||
#define SCOUTFS_LKF_INTERRUPTIBLE 0x04 /* pending signals return -ERESTARTSYS */
|
||||
#define SCOUTFS_LKF_INVALID (~((SCOUTFS_LKF_INTERRUPTIBLE << 1) - 1))
|
||||
|
||||
#define SCOUTFS_LOCK_NR_MODES SCOUTFS_LOCK_INVALID
|
||||
|
||||
struct inode_deletion_lock_data;
|
||||
|
||||
/*
|
||||
* A few fields (start, end, refresh_gen, write_version, granted_mode)
|
||||
* A few fields (start, end, refresh_gen, write_seq, granted_mode)
|
||||
* are referenced by code outside lock.c.
|
||||
*/
|
||||
struct scoutfs_lock {
|
||||
@@ -21,26 +24,22 @@ struct scoutfs_lock {
|
||||
struct rb_node node;
|
||||
struct rb_node range_node;
|
||||
u64 refresh_gen;
|
||||
u64 write_version;
|
||||
u64 write_seq;
|
||||
u64 dirty_trans_seq;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct list_head lru_head;
|
||||
wait_queue_head_t waitq;
|
||||
ktime_t grace_deadline;
|
||||
unsigned long request_pending:1,
|
||||
invalidate_pending:1;
|
||||
|
||||
struct list_head grant_head;
|
||||
struct scoutfs_net_lock_grant_response grant_resp;
|
||||
struct list_head inv_head;
|
||||
struct scoutfs_net_lock inv_nl;
|
||||
u64 inv_net_id;
|
||||
struct list_head inv_head; /* entry in linfo's list of locks with invalidations */
|
||||
struct list_head inv_list; /* list of lock's invalidation requests */
|
||||
struct list_head shrink_head;
|
||||
|
||||
spinlock_t cov_list_lock;
|
||||
struct list_head cov_list;
|
||||
|
||||
enum scoutfs_lock_mode mode;
|
||||
enum scoutfs_lock_mode invalidating_mode;
|
||||
unsigned int waiters[SCOUTFS_LOCK_NR_MODES];
|
||||
unsigned int users[SCOUTFS_LOCK_NR_MODES];
|
||||
|
||||
@@ -48,6 +47,9 @@ struct scoutfs_lock {
|
||||
|
||||
/* the forest tracks which log tree last saw bloom bit updates */
|
||||
atomic64_t forest_bloom_nr;
|
||||
|
||||
/* inode deletion tracks some state per lock */
|
||||
struct inode_deletion_lock_data *inode_deletion_data;
|
||||
};
|
||||
|
||||
struct scoutfs_lock_coverage {
|
||||
@@ -57,7 +59,7 @@ struct scoutfs_lock_coverage {
|
||||
};
|
||||
|
||||
int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
struct scoutfs_net_lock_grant_response *gr);
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_recover_request(struct super_block *sb, u64 net_id,
|
||||
@@ -80,8 +82,14 @@ int scoutfs_lock_inodes(struct super_block *sb, enum scoutfs_lock_mode mode, int
|
||||
struct inode *d, struct scoutfs_lock **D_lock);
|
||||
int scoutfs_lock_rename(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_rid(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 rid, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_orphan(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
u64 ino, struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_totl(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_xattr_indx(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
int scoutfs_lock_quota(struct super_block *sb, enum scoutfs_lock_mode mode, int flags,
|
||||
struct scoutfs_lock **lock);
|
||||
void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
@@ -96,9 +104,13 @@ void scoutfs_lock_del_coverage(struct super_block *sb,
|
||||
bool scoutfs_lock_protected(struct scoutfs_lock *lock, struct scoutfs_key *key,
|
||||
enum scoutfs_lock_mode mode);
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb, unsigned long nr);
|
||||
u64 scoutfs_lock_ino_refresh_gen(struct super_block *sb, u64 ino);
|
||||
|
||||
void scoutfs_free_unused_locks(struct super_block *sb);
|
||||
|
||||
int scoutfs_lock_setup(struct super_block *sb);
|
||||
void scoutfs_lock_unmount_begin(struct super_block *sb);
|
||||
void scoutfs_lock_flush_invalidate(struct super_block *sb);
|
||||
void scoutfs_lock_shutdown(struct super_block *sb);
|
||||
void scoutfs_lock_destroy(struct super_block *sb);
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@
|
||||
#include "tseq.h"
|
||||
#include "spbm.h"
|
||||
#include "block.h"
|
||||
#include "btree.h"
|
||||
#include "msg.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "lock_server.h"
|
||||
#include "recov.h"
|
||||
|
||||
/*
|
||||
* The scoutfs server implements a simple lock service. Client mounts
|
||||
@@ -56,14 +56,11 @@
|
||||
* Message requests and responses are reliably delivered in order across
|
||||
* reconnection.
|
||||
*
|
||||
* The server maintains a persistent record of connected clients. A new
|
||||
* server instance discovers these and waits for previously connected
|
||||
* clients to reconnect and recover their state before proceeding. If
|
||||
* clients don't reconnect they are forcefully prevented from unsafely
|
||||
* accessing the shared persistent storage. (fenced, according to the
|
||||
* rules of the platform.. could range from being powered off to having
|
||||
* their switch port disabled to having their local block device set
|
||||
* read-only.)
|
||||
* As a new server comes up it recovers lock state from existing clients
|
||||
* which were connected to a previous lock server. Recover requests are
|
||||
* sent to clients as they connect and they respond with all there
|
||||
* locks. Once all clients and locks are accounted for normal
|
||||
* processing can resume.
|
||||
*
|
||||
* The lock server doesn't respond to memory pressure. The only way
|
||||
* locks are freed is if they are invalidated to null on behalf of a
|
||||
@@ -77,19 +74,12 @@ struct lock_server_info {
|
||||
struct super_block *sb;
|
||||
|
||||
spinlock_t lock;
|
||||
struct mutex mutex;
|
||||
struct rb_root locks_root;
|
||||
|
||||
struct scoutfs_spbm recovery_pending;
|
||||
struct delayed_work recovery_dwork;
|
||||
|
||||
struct scoutfs_tseq_tree tseq_tree;
|
||||
struct dentry *tseq_dentry;
|
||||
|
||||
struct scoutfs_alloc *alloc;
|
||||
struct scoutfs_block_writer *wri;
|
||||
|
||||
atomic64_t write_version;
|
||||
struct scoutfs_tseq_tree stats_tseq_tree;
|
||||
struct dentry *stats_tseq_dentry;
|
||||
};
|
||||
|
||||
#define DECLARE_LOCK_SERVER_INFO(sb, name) \
|
||||
@@ -116,6 +106,9 @@ struct server_lock_node {
|
||||
struct list_head granted;
|
||||
struct list_head requested;
|
||||
struct list_head invalidated;
|
||||
|
||||
struct scoutfs_tseq_entry stats_tseq_entry;
|
||||
u64 stats[SLT_NR];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -160,30 +153,30 @@ enum {
|
||||
*/
|
||||
static void add_client_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
struct client_lock_entry *clent)
|
||||
struct client_lock_entry *c_ent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (list_empty(&clent->head))
|
||||
list_add_tail(&clent->head, list);
|
||||
if (list_empty(&c_ent->head))
|
||||
list_add_tail(&c_ent->head, list);
|
||||
else
|
||||
list_move_tail(&clent->head, list);
|
||||
list_move_tail(&c_ent->head, list);
|
||||
|
||||
clent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
c_ent->on_list = list == &snode->granted ? OL_GRANTED :
|
||||
list == &snode->requested ? OL_REQUESTED :
|
||||
OL_INVALIDATED;
|
||||
}
|
||||
|
||||
static void free_client_entry(struct lock_server_info *inf,
|
||||
struct server_lock_node *snode,
|
||||
struct client_lock_entry *clent)
|
||||
struct client_lock_entry *c_ent)
|
||||
{
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
if (!list_empty(&clent->head))
|
||||
list_del_init(&clent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &clent->tseq_entry);
|
||||
kfree(clent);
|
||||
if (!list_empty(&c_ent->head))
|
||||
list_del_init(&c_ent->head);
|
||||
scoutfs_tseq_del(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
kfree(c_ent);
|
||||
}
|
||||
|
||||
static bool invalid_mode(u8 mode)
|
||||
@@ -209,21 +202,48 @@ static u8 invalidation_mode(u8 granted, u8 requested)
|
||||
|
||||
/*
|
||||
* Return true of the client lock instances described by the entries can
|
||||
* be granted at the same time. Typically this only means they're both
|
||||
* modes that are compatible between nodes. In addition there's the
|
||||
* special case where a read lock on a client is compatible with a write
|
||||
* lock on the same client because the client's cache covered by the
|
||||
* read lock is still valid if they get a write lock.
|
||||
* be granted at the same time. There's only three cases where this is
|
||||
* true.
|
||||
*
|
||||
* First, the two locks are both of the same mode that allows full
|
||||
* sharing -- read and write only. The only point of these modes is
|
||||
* that everyone can share them.
|
||||
*
|
||||
* Second, a write lock gives the client permission to read as well.
|
||||
* This means that a client can upgrade its read lock to a write lock
|
||||
* without having to invalidate the existing read and drop caches.
|
||||
*
|
||||
* Third, null locks are always compatible between clients. It's as
|
||||
* though the client with the null lock has no lock at all. But it's
|
||||
* never compatible with all locks on the client requesting null.
|
||||
* Sending invalidations for existing locks on a client when we get a
|
||||
* null request is how we resolve races in shrinking locks -- we turn it
|
||||
* into the unsolicited remote invalidation case.
|
||||
*
|
||||
* All other mode and client combinations can not be shared, most
|
||||
* typically a write lock invalidating all other non-write holders to
|
||||
* drop caches and force a read after the write has completed.
|
||||
*/
|
||||
static bool client_entries_compatible(struct client_lock_entry *granted,
|
||||
struct client_lock_entry *requested)
|
||||
{
|
||||
return (granted->mode == requested->mode &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ ||
|
||||
granted->mode == SCOUTFS_LOCK_WRITE_ONLY)) ||
|
||||
(granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE);
|
||||
/* only read and write_only can be full shared */
|
||||
if ((granted->mode == requested->mode) &&
|
||||
(granted->mode == SCOUTFS_LOCK_READ || granted->mode == SCOUTFS_LOCK_WRITE_ONLY))
|
||||
return true;
|
||||
|
||||
/* _write includes reading, so a client can upgrade its read to write */
|
||||
if (granted->rid == requested->rid &&
|
||||
granted->mode == SCOUTFS_LOCK_READ &&
|
||||
requested->mode == SCOUTFS_LOCK_WRITE)
|
||||
return true;
|
||||
|
||||
/* null is always compatible across clients, never within a client */
|
||||
if ((granted->rid != requested->rid) &&
|
||||
(granted->mode == SCOUTFS_LOCK_NULL || requested->mode == SCOUTFS_LOCK_NULL))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -305,6 +325,8 @@ static struct server_lock_node *alloc_server_lock(struct lock_server_info *inf,
|
||||
snode = get_server_lock(inf, key, ins, false);
|
||||
if (snode != ins)
|
||||
kfree(ins);
|
||||
else
|
||||
scoutfs_tseq_add(&inf->stats_tseq_tree, &snode->stats_tseq_entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,33 +344,37 @@ static void put_server_lock(struct lock_server_info *inf,
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
|
||||
if (atomic_dec_and_test(&snode->refcount) &&
|
||||
list_empty(&snode->granted) &&
|
||||
list_empty(&snode->requested) &&
|
||||
list_empty(&snode->invalidated)) {
|
||||
spin_lock(&inf->lock);
|
||||
rb_erase(&snode->node, &inf->locks_root);
|
||||
spin_unlock(&inf->lock);
|
||||
should_free = true;
|
||||
}
|
||||
|
||||
spin_unlock(&inf->lock);
|
||||
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
if (should_free)
|
||||
if (should_free) {
|
||||
scoutfs_tseq_del(&inf->stats_tseq_tree, &snode->stats_tseq_entry);
|
||||
kfree(snode);
|
||||
}
|
||||
}
|
||||
|
||||
static struct client_lock_entry *find_entry(struct server_lock_node *snode,
|
||||
struct list_head *list,
|
||||
u64 rid)
|
||||
{
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
list_for_each_entry(clent, list, head) {
|
||||
if (clent->rid == rid)
|
||||
return clent;
|
||||
list_for_each_entry(c_ent, list, head) {
|
||||
if (c_ent->rid == rid)
|
||||
return c_ent;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -367,7 +393,7 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
u64 net_id, struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -379,27 +405,29 @@ int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = net_id;
|
||||
clent->mode = nl->new_mode;
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = net_id;
|
||||
c_ent->mode = nl->new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nl->key);
|
||||
if (snode == NULL) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
snode->stats[SLT_REQUEST]++;
|
||||
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->requested, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
out:
|
||||
@@ -418,7 +446,7 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
int ret;
|
||||
|
||||
@@ -430,25 +458,27 @@ int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX should always have a server lock here? recovery? */
|
||||
/* XXX should always have a server lock here? */
|
||||
snode = get_server_lock(inf, &nl->key, NULL, false);
|
||||
if (!snode) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!clent) {
|
||||
snode->stats[SLT_RESPONSE]++;
|
||||
|
||||
c_ent = find_entry(snode, &snode->invalidated, rid);
|
||||
if (!c_ent) {
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nl->new_mode == SCOUTFS_LOCK_NULL) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
} else {
|
||||
clent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
c_ent->mode = nl->new_mode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
}
|
||||
|
||||
ret = process_waiting_requests(sb, snode);
|
||||
@@ -473,31 +503,27 @@ out:
|
||||
* so we unlock the snode mutex.
|
||||
*
|
||||
* All progress must wait for all clients to finish with recovery
|
||||
* because we don't know which locks they'll hold. The unlocked
|
||||
* recovery_pending test here is OK. It's filled by setup before
|
||||
* anything runs. It's emptied by recovery completion. We can get a
|
||||
* false nonempty result if we race with recovery completion, but that's
|
||||
* OK because recovery completion processes all the locks that have
|
||||
* requests after emptying, including the unlikely loser of that race.
|
||||
* because we don't know which locks they'll hold. Once recover
|
||||
* finishes the server calls us to kick all the locks that were waiting
|
||||
* during recovery.
|
||||
*/
|
||||
static int process_waiting_requests(struct super_block *sb,
|
||||
struct server_lock_node *snode)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct scoutfs_net_lock_grant_response gres;
|
||||
struct scoutfs_net_lock nl;
|
||||
struct client_lock_entry *req;
|
||||
struct client_lock_entry *req_tmp;
|
||||
struct client_lock_entry *gr;
|
||||
struct client_lock_entry *gr_tmp;
|
||||
u64 wv;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&snode->mutex));
|
||||
|
||||
/* processing waits for all invalidation responses or recovery */
|
||||
if (!list_empty(&snode->invalidated) ||
|
||||
!scoutfs_spbm_empty(&inf->recovery_pending)) {
|
||||
scoutfs_recov_next_pending(sb, 0, SCOUTFS_RECOV_LOCKS) != 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -521,6 +547,7 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
trace_scoutfs_lock_message(sb, SLT_SERVER,
|
||||
SLT_INVALIDATE, SLT_REQUEST,
|
||||
gr->rid, 0, &nl);
|
||||
snode->stats[SLT_INVALIDATE]++;
|
||||
|
||||
add_client_entry(snode, &snode->invalidated, gr);
|
||||
}
|
||||
@@ -531,6 +558,7 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
|
||||
nl.key = snode->key;
|
||||
nl.new_mode = req->mode;
|
||||
nl.write_seq = 0;
|
||||
|
||||
/* see if there's an existing compatible grant to replace */
|
||||
gr = find_entry(snode, &snode->granted, req->rid);
|
||||
@@ -543,21 +571,20 @@ static int process_waiting_requests(struct super_block *sb,
|
||||
|
||||
if (nl.new_mode == SCOUTFS_LOCK_WRITE ||
|
||||
nl.new_mode == SCOUTFS_LOCK_WRITE_ONLY) {
|
||||
wv = atomic64_inc_return(&inf->write_version);
|
||||
nl.write_version = cpu_to_le64(wv);
|
||||
/* doesn't commit seq update, recovered with locks */
|
||||
seq = scoutfs_server_next_seq(sb);
|
||||
nl.write_seq = cpu_to_le64(seq);
|
||||
}
|
||||
|
||||
gres.nl = nl;
|
||||
scoutfs_server_get_roots(sb, &gres.roots);
|
||||
|
||||
ret = scoutfs_server_lock_response(sb, req->rid,
|
||||
req->net_id, &gres);
|
||||
req->net_id, &nl);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_scoutfs_lock_message(sb, SLT_SERVER, SLT_GRANT,
|
||||
SLT_RESPONSE, req->rid,
|
||||
req->net_id, &nl);
|
||||
snode->stats[SLT_GRANT]++;
|
||||
|
||||
/* don't track null client locks, track all else */
|
||||
if (req->mode == SCOUTFS_LOCK_NULL)
|
||||
@@ -573,89 +600,39 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void init_lock_clients_key(struct scoutfs_key *key, u64 rid)
|
||||
{
|
||||
*key = (struct scoutfs_key) {
|
||||
.sk_zone = SCOUTFS_LOCK_CLIENTS_ZONE,
|
||||
.sklc_rid = cpu_to_le64(rid),
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The server received a greeting from a client for the first time. If
|
||||
* the client had already talked to the server then we must find an
|
||||
* existing record for it and should begin recovery. If it doesn't have
|
||||
* a record then its timed out and we can't allow it to reconnect. If
|
||||
* we're creating a new record for a client we can see EEXIST if the
|
||||
* greeting is resent to a new server after the record was committed but
|
||||
* before the response was received by the client.
|
||||
* the client is in lock recovery then we send the initial lock request.
|
||||
*
|
||||
* This is running in concurrent client greeting processing contexts.
|
||||
*/
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
|
||||
bool should_exist)
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
init_lock_clients_key(&key, rid);
|
||||
|
||||
mutex_lock(&inf->mutex);
|
||||
if (should_exist) {
|
||||
ret = scoutfs_btree_lookup(sb, &super->lock_clients, &key,
|
||||
&iref);
|
||||
if (ret == 0)
|
||||
scoutfs_btree_put_iref(&iref);
|
||||
} else {
|
||||
ret = scoutfs_btree_insert(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients,
|
||||
&key, NULL, 0);
|
||||
if (ret == -EEXIST)
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&inf->mutex);
|
||||
|
||||
if (should_exist && ret == 0) {
|
||||
if (scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
|
||||
scoutfs_key_set_zeros(&key);
|
||||
ret = scoutfs_server_lock_recover_request(sb, rid, &key);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A client sent their last recovery response and can exit recovery. If
|
||||
* they were the last client in recovery then we can process all the
|
||||
* server locks that had requests.
|
||||
* All clients have finished lock recovery, we can make forward process
|
||||
* on all the queued requests that were waiting on recovery.
|
||||
*/
|
||||
static int finished_recovery(struct super_block *sb, u64 rid, bool cancel)
|
||||
int scoutfs_lock_server_finished_recovery(struct super_block *sb)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
bool still_pending;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
scoutfs_spbm_clear(&inf->recovery_pending, rid);
|
||||
still_pending = !scoutfs_spbm_empty(&inf->recovery_pending);
|
||||
spin_unlock(&inf->lock);
|
||||
if (still_pending)
|
||||
return 0;
|
||||
|
||||
if (cancel)
|
||||
cancel_delayed_work_sync(&inf->recovery_dwork);
|
||||
|
||||
scoutfs_key_set_zeros(&key);
|
||||
|
||||
scoutfs_info(sb, "all lock clients recovered");
|
||||
|
||||
while ((snode = get_server_lock(inf, &key, NULL, true))) {
|
||||
|
||||
key = snode->key;
|
||||
@@ -673,14 +650,6 @@ static int finished_recovery(struct super_block *sb, u64 rid, bool cancel)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_max_write_version(struct lock_server_info *inf, u64 new)
|
||||
{
|
||||
u64 old;
|
||||
|
||||
while (new > (old = atomic64_read(&inf->write_version)) &&
|
||||
(atomic64_cmpxchg(&inf->write_version, old, new) != old));
|
||||
}
|
||||
|
||||
/*
|
||||
* We sent a lock recover request to the client when we received its
|
||||
* greeting while in recovery. Here we instantiate all the locks it
|
||||
@@ -692,62 +661,61 @@ int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct client_lock_entry *existing;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* client must be in recovery */
|
||||
spin_lock(&inf->lock);
|
||||
if (!scoutfs_spbm_test(&inf->recovery_pending, rid))
|
||||
if (!scoutfs_recov_is_pending(sb, rid, SCOUTFS_RECOV_LOCKS)) {
|
||||
ret = -EINVAL;
|
||||
spin_unlock(&inf->lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* client has sent us all their locks */
|
||||
if (nlr->nr == 0) {
|
||||
ret = finished_recovery(sb, rid, true);
|
||||
scoutfs_server_recov_finish(sb, rid, SCOUTFS_RECOV_LOCKS);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < le16_to_cpu(nlr->nr); i++) {
|
||||
clent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!clent) {
|
||||
c_ent = kzalloc(sizeof(struct client_lock_entry), GFP_NOFS);
|
||||
if (!c_ent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&clent->head);
|
||||
clent->rid = rid;
|
||||
clent->net_id = 0;
|
||||
clent->mode = nlr->locks[i].new_mode;
|
||||
INIT_LIST_HEAD(&c_ent->head);
|
||||
c_ent->rid = rid;
|
||||
c_ent->net_id = 0;
|
||||
c_ent->mode = nlr->locks[i].new_mode;
|
||||
|
||||
snode = alloc_server_lock(inf, &nlr->locks[i].key);
|
||||
if (snode == NULL) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
existing = find_entry(snode, &snode->granted, rid);
|
||||
if (existing) {
|
||||
kfree(clent);
|
||||
kfree(c_ent);
|
||||
put_server_lock(inf, snode);
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
clent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, clent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &clent->tseq_entry);
|
||||
c_ent->snode = snode;
|
||||
add_client_entry(snode, &snode->granted, c_ent);
|
||||
scoutfs_tseq_add(&inf->tseq_tree, &c_ent->tseq_entry);
|
||||
|
||||
put_server_lock(inf, snode);
|
||||
|
||||
/* make sure next write lock is greater than all recovered */
|
||||
set_max_write_version(inf,
|
||||
le64_to_cpu(nlr->locks[i].write_version));
|
||||
/* make sure next core seq is greater than all lock write seq */
|
||||
scoutfs_server_set_seq_if_greater(sb,
|
||||
le64_to_cpu(nlr->locks[i].write_seq));
|
||||
}
|
||||
|
||||
/* send request for next batch of keys */
|
||||
@@ -759,102 +727,16 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_rid_and_put_ref(struct scoutfs_btree_item_ref *iref, u64 *rid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (iref->val_len == 0) {
|
||||
*rid = le64_to_cpu(iref->key->sklc_rid);
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
scoutfs_btree_put_iref(iref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work executes if enough time passes without all of the clients
|
||||
* finishing with recovery and canceling the work. We walk through the
|
||||
* client records and find any that still have their recovery pending.
|
||||
*/
|
||||
static void scoutfs_lock_server_recovery_timeout(struct work_struct *work)
|
||||
{
|
||||
struct lock_server_info *inf = container_of(work,
|
||||
struct lock_server_info,
|
||||
recovery_dwork.work);
|
||||
struct super_block *sb = inf->sb;
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
bool timed_out;
|
||||
u64 rid;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_server_hold_commit(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* we enter recovery if there are any client records */
|
||||
for (rid = 0; ; rid++) {
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
if (ret == 0)
|
||||
ret = get_rid_and_put_ref(&iref, &rid);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
spin_lock(&inf->lock);
|
||||
if (scoutfs_spbm_test(&inf->recovery_pending, rid)) {
|
||||
scoutfs_spbm_clear(&inf->recovery_pending, rid);
|
||||
timed_out = true;
|
||||
} else {
|
||||
timed_out = false;
|
||||
}
|
||||
spin_unlock(&inf->lock);
|
||||
|
||||
if (!timed_out)
|
||||
continue;
|
||||
|
||||
scoutfs_err(sb, "client rid %016llx lock recovery timed out",
|
||||
rid);
|
||||
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients, &key);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = scoutfs_server_apply_commit(sb, ret);
|
||||
out:
|
||||
/* force processing all pending lock requests */
|
||||
if (ret == 0)
|
||||
ret = finished_recovery(sb, 0, false);
|
||||
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server saw err %d while timing out clients, shutting down", ret);
|
||||
scoutfs_server_abort(sb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A client is leaving the lock service. They aren't using locks and
|
||||
* won't send any more requests. We tear down all the state we had for
|
||||
* them. This can be called multiple times for a given client as their
|
||||
* farewell is resent to new servers. It's OK to not find any state.
|
||||
* If we fail to delete a persistent entry then we have to shut down and
|
||||
* hope that the next server has more luck.
|
||||
*/
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *tmp;
|
||||
struct server_lock_node *snode;
|
||||
struct scoutfs_key key;
|
||||
@@ -862,20 +744,7 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
bool freed;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&inf->mutex);
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_delete(sb, inf->alloc, inf->wri,
|
||||
&super->lock_clients, &key);
|
||||
mutex_unlock(&inf->mutex);
|
||||
if (ret == -ENOENT) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scoutfs_key_set_zeros(&key);
|
||||
|
||||
while ((snode = get_server_lock(inf, &key, NULL, true))) {
|
||||
|
||||
freed = false;
|
||||
@@ -884,9 +753,9 @@ int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid)
|
||||
(list == &snode->requested) ? &snode->invalidated :
|
||||
NULL) {
|
||||
|
||||
list_for_each_entry_safe(clent, tmp, list, head) {
|
||||
if (clent->rid == rid) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
list_for_each_entry_safe(c_ent, tmp, list, head) {
|
||||
if (c_ent->rid == rid) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
freed = true;
|
||||
}
|
||||
}
|
||||
@@ -909,7 +778,7 @@ out:
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "lock server err %d during client rid %016llx farewell, shutting down",
|
||||
ret, rid);
|
||||
scoutfs_server_abort(sb);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -947,36 +816,35 @@ static char *lock_on_list_string(u8 on_list)
|
||||
static void lock_server_tseq_show(struct seq_file *m,
|
||||
struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
struct client_lock_entry *clent = container_of(ent,
|
||||
struct client_lock_entry *c_ent = container_of(ent,
|
||||
struct client_lock_entry,
|
||||
tseq_entry);
|
||||
struct server_lock_node *snode = clent->snode;
|
||||
struct server_lock_node *snode = c_ent->snode;
|
||||
|
||||
seq_printf(m, SK_FMT" %s %s rid %016llx net_id %llu\n",
|
||||
SK_ARG(&snode->key), lock_mode_string(clent->mode),
|
||||
lock_on_list_string(clent->on_list), clent->rid,
|
||||
clent->net_id);
|
||||
SK_ARG(&snode->key), lock_mode_string(c_ent->mode),
|
||||
lock_on_list_string(c_ent->on_list), c_ent->rid,
|
||||
c_ent->net_id);
|
||||
}
|
||||
|
||||
static void stats_tseq_show(struct seq_file *m, struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
struct server_lock_node *snode = container_of(ent, struct server_lock_node,
|
||||
stats_tseq_entry);
|
||||
|
||||
seq_printf(m, SK_FMT" req %llu inv %llu rsp %llu gr %llu\n",
|
||||
SK_ARG(&snode->key), snode->stats[SLT_REQUEST], snode->stats[SLT_INVALIDATE],
|
||||
snode->stats[SLT_RESPONSE], snode->stats[SLT_GRANT]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the lock server. This is called before networking can deliver
|
||||
* requests. If we find existing client records then we enter recovery.
|
||||
* Lock request processing is deferred until recovery is resolved for
|
||||
* all the existing clients, either they reconnect and replay locks or
|
||||
* we time them out.
|
||||
* requests.
|
||||
*/
|
||||
int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 max_vers)
|
||||
int scoutfs_lock_server_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct lock_server_info *inf;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
struct scoutfs_key key;
|
||||
unsigned int nr;
|
||||
u64 rid;
|
||||
int ret;
|
||||
|
||||
inf = kzalloc(sizeof(struct lock_server_info), GFP_KERNEL);
|
||||
if (!inf)
|
||||
@@ -984,15 +852,9 @@ int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
|
||||
inf->sb = sb;
|
||||
spin_lock_init(&inf->lock);
|
||||
mutex_init(&inf->mutex);
|
||||
inf->locks_root = RB_ROOT;
|
||||
scoutfs_spbm_init(&inf->recovery_pending);
|
||||
INIT_DELAYED_WORK(&inf->recovery_dwork,
|
||||
scoutfs_lock_server_recovery_timeout);
|
||||
scoutfs_tseq_tree_init(&inf->tseq_tree, lock_server_tseq_show);
|
||||
inf->alloc = alloc;
|
||||
inf->wri = wri;
|
||||
atomic64_set(&inf->write_version, max_vers); /* inc_return gives +1 */
|
||||
scoutfs_tseq_tree_init(&inf->stats_tseq_tree, stats_tseq_show);
|
||||
|
||||
inf->tseq_dentry = scoutfs_tseq_create("server_locks", sbi->debug_root,
|
||||
&inf->tseq_tree);
|
||||
@@ -1001,38 +863,17 @@ int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
inf->stats_tseq_dentry = scoutfs_tseq_create("server_lock_stats", sbi->debug_root,
|
||||
&inf->stats_tseq_tree);
|
||||
if (!inf->stats_tseq_dentry) {
|
||||
debugfs_remove(inf->tseq_dentry);
|
||||
kfree(inf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sbi->lock_server_info = inf;
|
||||
|
||||
/* we enter recovery if there are any client records */
|
||||
nr = 0;
|
||||
for (rid = 0; ; rid++) {
|
||||
init_lock_clients_key(&key, rid);
|
||||
ret = scoutfs_btree_next(sb, &super->lock_clients, &key, &iref);
|
||||
if (ret == -ENOENT)
|
||||
break;
|
||||
if (ret == 0)
|
||||
ret = get_rid_and_put_ref(&iref, &rid);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_spbm_set(&inf->recovery_pending, rid);
|
||||
if (ret)
|
||||
goto out;
|
||||
nr++;
|
||||
|
||||
if (rid == U64_MAX)
|
||||
break;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
if (nr) {
|
||||
schedule_delayed_work(&inf->recovery_dwork,
|
||||
msecs_to_jiffies(LOCK_SERVER_RECOVERY_MS));
|
||||
scoutfs_info(sb, "waiting for %u lock clients to recover", nr);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1045,14 +886,13 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
DECLARE_LOCK_SERVER_INFO(sb, inf);
|
||||
struct server_lock_node *snode;
|
||||
struct server_lock_node *stmp;
|
||||
struct client_lock_entry *clent;
|
||||
struct client_lock_entry *c_ent;
|
||||
struct client_lock_entry *ctmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (inf) {
|
||||
cancel_delayed_work_sync(&inf->recovery_dwork);
|
||||
|
||||
debugfs_remove(inf->tseq_dentry);
|
||||
debugfs_remove(inf->stats_tseq_dentry);
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(snode, stmp,
|
||||
&inf->locks_root, node) {
|
||||
@@ -1062,16 +902,14 @@ void scoutfs_lock_server_destroy(struct super_block *sb)
|
||||
list_splice_init(&snode->invalidated, &list);
|
||||
|
||||
mutex_lock(&snode->mutex);
|
||||
list_for_each_entry_safe(clent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, clent);
|
||||
list_for_each_entry_safe(c_ent, ctmp, &list, head) {
|
||||
free_client_entry(inf, snode, c_ent);
|
||||
}
|
||||
mutex_unlock(&snode->mutex);
|
||||
|
||||
kfree(snode);
|
||||
}
|
||||
|
||||
scoutfs_spbm_destroy(&inf->recovery_pending);
|
||||
|
||||
kfree(inf);
|
||||
sbi->lock_server_info = NULL;
|
||||
}
|
||||
|
||||
@@ -3,17 +3,15 @@
|
||||
|
||||
int scoutfs_lock_server_recover_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock_recover *nlr);
|
||||
int scoutfs_lock_server_finished_recovery(struct super_block *sb);
|
||||
int scoutfs_lock_server_request(struct super_block *sb, u64 rid,
|
||||
u64 net_id, struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid,
|
||||
bool should_exist);
|
||||
int scoutfs_lock_server_greeting(struct super_block *sb, u64 rid);
|
||||
int scoutfs_lock_server_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_lock_server_farewell(struct super_block *sb, u64 rid);
|
||||
|
||||
int scoutfs_lock_server_setup(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri, u64 max_vers);
|
||||
int scoutfs_lock_server_setup(struct super_block *sb);
|
||||
void scoutfs_lock_server_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include "key.h"
|
||||
#include "counters.h"
|
||||
#include "super.h"
|
||||
|
||||
void __printf(4, 5) scoutfs_msg(struct super_block *sb, const char *prefix,
|
||||
const char *str, const char *fmt, ...);
|
||||
@@ -23,6 +24,9 @@ do { \
|
||||
#define scoutfs_info(sb, fmt, args...) \
|
||||
scoutfs_msg_check(sb, KERN_INFO, "", fmt, ##args)
|
||||
|
||||
#define scoutfs_tprintk(sb, fmt, args...) \
|
||||
trace_printk(SCSBF " " fmt "\n", SCSB_ARGS(sb), ##args);
|
||||
|
||||
#define scoutfs_bug_on(sb, cond, fmt, args...) \
|
||||
do { \
|
||||
if (cond) { \
|
||||
|
||||
296
kmod/src/net.c
296
kmod/src/net.c
@@ -30,6 +30,7 @@
|
||||
#include "net.h"
|
||||
#include "endian_swap.h"
|
||||
#include "tseq.h"
|
||||
#include "fence.h"
|
||||
|
||||
/*
|
||||
* scoutfs networking delivers requests and responses between nodes.
|
||||
@@ -330,6 +331,9 @@ static int submit_send(struct super_block *sb,
|
||||
WARN_ON_ONCE(id == 0 && (flags & SCOUTFS_NET_FLAG_RESPONSE)))
|
||||
return -EINVAL;
|
||||
|
||||
if (scoutfs_forcing_unmount(sb))
|
||||
return -EIO;
|
||||
|
||||
msend = kmalloc(offsetof(struct message_send,
|
||||
nh.data[data_len]), GFP_NOFS);
|
||||
if (!msend)
|
||||
@@ -351,6 +355,7 @@ static int submit_send(struct super_block *sb,
|
||||
}
|
||||
if (rid != 0) {
|
||||
spin_unlock(&conn->lock);
|
||||
kfree(msend);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
}
|
||||
@@ -420,6 +425,16 @@ static int process_request(struct scoutfs_net_connection *conn,
|
||||
mrecv->nh.data, le16_to_cpu(mrecv->nh.data_len));
|
||||
}
|
||||
|
||||
static int call_resp_func(struct super_block *sb, struct scoutfs_net_connection *conn,
|
||||
scoutfs_net_response_t resp_func, void *resp_data,
|
||||
void *resp, unsigned int resp_len, int error)
|
||||
{
|
||||
if (resp_func)
|
||||
return resp_func(sb, conn, resp, resp_len, error, resp_data);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* An incoming response finds the queued request and calls its response
|
||||
* function. The response function for a given request will only be
|
||||
@@ -434,7 +449,6 @@ static int process_response(struct scoutfs_net_connection *conn,
|
||||
struct message_send *msend;
|
||||
scoutfs_net_response_t resp_func = NULL;
|
||||
void *resp_data;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
@@ -449,11 +463,8 @@ static int process_response(struct scoutfs_net_connection *conn,
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
if (resp_func)
|
||||
ret = resp_func(sb, conn, mrecv->nh.data,
|
||||
le16_to_cpu(mrecv->nh.data_len),
|
||||
net_err_to_host(mrecv->nh.error), resp_data);
|
||||
return ret;
|
||||
return call_resp_func(sb, conn, resp_func, resp_data, mrecv->nh.data,
|
||||
le16_to_cpu(mrecv->nh.data_len), net_err_to_host(mrecv->nh.error));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -491,12 +502,12 @@ static void scoutfs_net_proc_worker(struct work_struct *work)
|
||||
* Free live responses up to and including the seq by marking them dead
|
||||
* and moving them to the send queue to be freed.
|
||||
*/
|
||||
static int move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
static bool move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
struct list_head *list, u64 seq)
|
||||
{
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
int ret = 0;
|
||||
bool moved = false;
|
||||
|
||||
assert_spin_locked(&conn->lock);
|
||||
|
||||
@@ -508,20 +519,20 @@ static int move_acked_responses(struct scoutfs_net_connection *conn,
|
||||
|
||||
msend->dead = 1;
|
||||
list_move(&msend->head, &conn->send_queue);
|
||||
ret = 1;
|
||||
moved = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return moved;
|
||||
}
|
||||
|
||||
/* acks are processed inline in the recv worker */
|
||||
static void free_acked_responses(struct scoutfs_net_connection *conn, u64 seq)
|
||||
{
|
||||
int moved;
|
||||
bool moved;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) +
|
||||
moved = move_acked_responses(conn, &conn->send_queue, seq) |
|
||||
move_acked_responses(conn, &conn->resend_queue, seq);
|
||||
|
||||
spin_unlock(&conn->lock);
|
||||
@@ -538,12 +549,16 @@ static int recvmsg_full(struct socket *sock, void *buf, unsigned len)
|
||||
|
||||
while (len) {
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
kv.iov_base = buf;
|
||||
kv.iov_len = len;
|
||||
|
||||
#ifndef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
#else
|
||||
iov_iter_init(&msg.msg_iter, READ, (struct iovec *)&kv, len, 1);
|
||||
#endif
|
||||
ret = kernel_recvmsg(sock, &msg, &kv, 1, len, msg.msg_flags);
|
||||
if (ret <= 0)
|
||||
return -ECONNABORTED;
|
||||
@@ -619,8 +634,6 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_recv_clock_sync(nh.clock_sync_id);
|
||||
|
||||
data_len = le16_to_cpu(nh.data_len);
|
||||
|
||||
scoutfs_inc_counter(sb, net_recv_messages);
|
||||
@@ -667,8 +680,15 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
|
||||
|
||||
scoutfs_tseq_add(&ninf->msg_tseq_tree, &mrecv->tseq_entry);
|
||||
|
||||
/* synchronously process greeting before next recvmsg */
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING)
|
||||
/*
|
||||
* Initial received greetings are processed
|
||||
* synchronously before any other incoming messages.
|
||||
*
|
||||
* Incoming requests or responses to the lock client are
|
||||
* called synchronously to avoid reordering.
|
||||
*/
|
||||
if (nh.cmd == SCOUTFS_NET_CMD_GREETING ||
|
||||
(nh.cmd == SCOUTFS_NET_CMD_LOCK && !conn->listening_conn))
|
||||
scoutfs_net_proc_worker(&mrecv->proc_work);
|
||||
else
|
||||
queue_work(conn->workq, &mrecv->proc_work);
|
||||
@@ -691,12 +711,16 @@ static int sendmsg_full(struct socket *sock, void *buf, unsigned len)
|
||||
|
||||
while (len) {
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
kv.iov_base = buf;
|
||||
kv.iov_len = len;
|
||||
|
||||
#ifndef KC_MSGHDR_STRUCT_IOV_ITER
|
||||
msg.msg_iov = (struct iovec *)&kv;
|
||||
msg.msg_iovlen = 1;
|
||||
#else
|
||||
iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)&kv, len, 1);
|
||||
#endif
|
||||
ret = kernel_sendmsg(sock, &msg, &kv, 1, len);
|
||||
if (ret <= 0)
|
||||
return -ECONNABORTED;
|
||||
@@ -768,9 +792,6 @@ static void scoutfs_net_send_worker(struct work_struct *work)
|
||||
trace_scoutfs_net_send_message(sb, &conn->sockname,
|
||||
&conn->peername, &msend->nh);
|
||||
|
||||
msend->nh.clock_sync_id = scoutfs_clock_sync_id();
|
||||
trace_scoutfs_send_clock_sync(msend->nh.clock_sync_id);
|
||||
|
||||
ret = sendmsg_full(conn->sock, &msend->nh, len);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
@@ -823,11 +844,9 @@ static void scoutfs_net_destroy_worker(struct work_struct *work)
|
||||
if (conn->listening_conn && conn->notify_down)
|
||||
conn->notify_down(sb, conn, conn->info, conn->rid);
|
||||
|
||||
/* free all messages, refactor and complete for forced unmount? */
|
||||
list_splice_init(&conn->resend_queue, &conn->send_queue);
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head) {
|
||||
list_for_each_entry_safe(msend, tmp, &conn->send_queue, head)
|
||||
free_msend(ninf, msend);
|
||||
}
|
||||
|
||||
/* accepted sockets are removed from their listener's list */
|
||||
if (conn->listening_conn) {
|
||||
@@ -857,74 +876,87 @@ static void destroy_conn(struct scoutfs_net_connection *conn)
|
||||
}
|
||||
|
||||
/*
|
||||
* Have a pretty aggressive keepalive timeout of around 10 seconds. The
|
||||
* TCP keepalives are being processed out of task context so they should
|
||||
* be responsive even when mounts are under load.
|
||||
* By default, TCP would maintain a connection to an unresponsive peer
|
||||
* for a very long time indeed. We can't do that because quorum
|
||||
* members will only participate in an election when they don't have a
|
||||
* healthy connection to a server. We use the KEEPALIVE* and
|
||||
* TCP_USER_TIMEOUT options to ensure that we'll break an unresponsive
|
||||
* connection and return to the quorum and client connection paths to
|
||||
* try and establish a new connection to an active server.
|
||||
*
|
||||
* The TCP_KEEP* and TCP_USER_TIMEOUT option interaction is subtle.
|
||||
* TCP_USER_TIMEOUT only applies if there is unacked written data in the
|
||||
* send queue. It doesn't work if the connection is idle. Adding
|
||||
* keepalice probes with user_timeout set changes how the keepalive
|
||||
* timeout is calculated. CNT no longer matters. Each time
|
||||
* additional probes (not the first) are sent the user timeout is
|
||||
* checked against the last time data was received. If none of the
|
||||
* keepalives are responded to then eventually the user timeout applies.
|
||||
*
|
||||
* Given all this, we start with the overall unresponsive timeout. Then
|
||||
* we set the probes to start sending towards the end of the timeout.
|
||||
* We give it a few tries for a successful response before the timeout
|
||||
* elapses during the probe timer processing after the unsuccessful
|
||||
* probes.
|
||||
*/
|
||||
#define KEEPCNT 3
|
||||
#define KEEPIDLE 7
|
||||
#define KEEPINTVL 1
|
||||
#define UNRESPONSIVE_TIMEOUT_SECS 10
|
||||
#define UNRESPONSIVE_PROBES 3
|
||||
static int sock_opts_and_names(struct scoutfs_net_connection *conn,
|
||||
struct socket *sock)
|
||||
{
|
||||
struct timeval tv;
|
||||
int addrlen;
|
||||
int optval;
|
||||
int ret;
|
||||
|
||||
/* but use a keepalive timeout instead of send timeout */
|
||||
tv.tv_sec = 0;
|
||||
tv.tv_usec = 0;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
/* we use a keepalive timeout instead of send timeout */
|
||||
ret = kc_sock_set_sndtimeo(sock, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = KEEPCNT;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
(char *)&optval, sizeof(optval));
|
||||
/* not checked when user_timeout != 0, but for clarity */
|
||||
optval = UNRESPONSIVE_PROBES;
|
||||
ret = kc_sock_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = KEEPIDLE;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = KEEPINTVL;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
|
||||
(char *)&optval, sizeof(optval));
|
||||
BUILD_BUG_ON(UNRESPONSIVE_PROBES >= UNRESPONSIVE_TIMEOUT_SECS);
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS - (UNRESPONSIVE_PROBES);
|
||||
ret = kc_tcp_sock_set_keepidle(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_tcp_sock_set_keepintvl(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = UNRESPONSIVE_TIMEOUT_SECS * MSEC_PER_SEC;
|
||||
ret = kc_tcp_sock_set_user_timeout(sock, optval);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
ret = kernel_getsockname(sock, (struct sockaddr *)&conn->sockname,
|
||||
&addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
ret = -EAFNOSUPPORT;
|
||||
ret = kc_tcp_sock_set_nodelay(sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
ret = kernel_getpeername(sock, (struct sockaddr *)&conn->peername,
|
||||
&addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
ret = -EAFNOSUPPORT;
|
||||
if (ret)
|
||||
ret = kc_kernel_getsockname(sock, (struct sockaddr *)&conn->sockname);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = kc_kernel_getpeername(sock, (struct sockaddr *)&conn->peername);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
|
||||
conn->last_peername = conn->peername;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -944,7 +976,6 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
struct scoutfs_net_connection *acc_conn;
|
||||
DECLARE_WAIT_QUEUE_HEAD(waitq);
|
||||
struct socket *acc_sock;
|
||||
LIST_HEAD(conn_list);
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_listen_work_enter(sb, 0, 0);
|
||||
@@ -954,6 +985,8 @@ static void scoutfs_net_listen_worker(struct work_struct *work)
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
acc_sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* inherit accepted request funcs from listening conn */
|
||||
acc_conn = scoutfs_net_alloc_conn(sb, conn->notify_up,
|
||||
conn->notify_down,
|
||||
@@ -1007,20 +1040,18 @@ static void scoutfs_net_connect_worker(struct work_struct *work)
|
||||
DEFINE_CONN_FROM_WORK(conn, work, connect_work);
|
||||
struct super_block *sb = conn->sb;
|
||||
struct socket *sock;
|
||||
struct timeval tv;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_connect_work_enter(sb, 0, 0);
|
||||
|
||||
ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* caller specified connect timeout */
|
||||
tv.tv_sec = conn->connect_timeout_ms / MSEC_PER_SEC;
|
||||
tv.tv_usec = (conn->connect_timeout_ms % MSEC_PER_SEC) * USEC_PER_MSEC;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
|
||||
(char *)&tv, sizeof(tv));
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
/* caller specified connect timeout, defaults to 1 sec */
|
||||
ret = kc_sock_set_sndtimeo(sock, conn->connect_timeout_ms / MSEC_PER_SEC);
|
||||
if (ret) {
|
||||
sock_release(sock);
|
||||
goto out;
|
||||
@@ -1089,9 +1120,11 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
struct net_info *ninf = SCOUTFS_SB(sb)->net_info;
|
||||
struct scoutfs_net_connection *listener;
|
||||
struct scoutfs_net_connection *acc_conn;
|
||||
scoutfs_net_response_t resp_func;
|
||||
struct message_send *msend;
|
||||
struct message_send *tmp;
|
||||
unsigned long delay;
|
||||
void *resp_data;
|
||||
|
||||
trace_scoutfs_net_shutdown_work_enter(sb, 0, 0);
|
||||
trace_scoutfs_conn_shutdown_start(conn);
|
||||
@@ -1137,6 +1170,30 @@ static void scoutfs_net_shutdown_worker(struct work_struct *work)
|
||||
/* and wait for accepted conn shutdown work to finish */
|
||||
wait_event(conn->waitq, empty_accepted_list(conn));
|
||||
|
||||
/*
|
||||
* Forced unmount will cause net submit to fail once it's
|
||||
* started and it calls shutdown to interrupt any previous
|
||||
* senders waiting for a response. The response callbacks can
|
||||
* do quite a lot of work so we're careful to call them outside
|
||||
* the lock.
|
||||
*/
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
spin_lock(&conn->lock);
|
||||
list_splice_tail_init(&conn->send_queue, &conn->resend_queue);
|
||||
while ((msend = list_first_entry_or_null(&conn->resend_queue,
|
||||
struct message_send, head))) {
|
||||
resp_func = msend->resp_func;
|
||||
resp_data = msend->resp_data;
|
||||
free_msend(ninf, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
call_resp_func(sb, conn, resp_func, resp_data, NULL, 0, -ECONNABORTED);
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
}
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
/* greetings aren't resent across sockets */
|
||||
@@ -1206,6 +1263,7 @@ static void scoutfs_net_reconn_free_worker(struct work_struct *work)
|
||||
unsigned long now = jiffies;
|
||||
unsigned long deadline = 0;
|
||||
bool requeue = false;
|
||||
int ret;
|
||||
|
||||
trace_scoutfs_net_reconn_free_work_enter(sb, 0, 0);
|
||||
|
||||
@@ -1219,10 +1277,18 @@ restart:
|
||||
time_after_eq(now, acc->reconn_deadline))) {
|
||||
set_conn_fl(acc, reconn_freeing);
|
||||
spin_unlock(&conn->lock);
|
||||
if (!test_conn_fl(conn, shutting_down))
|
||||
scoutfs_info(sb, "client timed out "SIN_FMT" -> "SIN_FMT", can not reconnect",
|
||||
SIN_ARG(&acc->sockname),
|
||||
SIN_ARG(&acc->peername));
|
||||
if (!test_conn_fl(conn, shutting_down)) {
|
||||
scoutfs_info(sb, "client "SIN_FMT" reconnect timed out, fencing",
|
||||
SIN_ARG(&acc->last_peername));
|
||||
ret = scoutfs_fence_start(sb, acc->rid,
|
||||
acc->last_peername.sin_addr.s_addr,
|
||||
SCOUTFS_FENCE_CLIENT_RECONNECT);
|
||||
if (ret) {
|
||||
scoutfs_err(sb, "client fence returned err %d, shutting down server",
|
||||
ret);
|
||||
scoutfs_server_stop(sb);
|
||||
}
|
||||
}
|
||||
destroy_conn(acc);
|
||||
goto restart;
|
||||
}
|
||||
@@ -1269,10 +1335,12 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
if (!conn)
|
||||
return NULL;
|
||||
|
||||
conn->info = kzalloc(info_size, GFP_NOFS);
|
||||
if (!conn->info) {
|
||||
kfree(conn);
|
||||
return NULL;
|
||||
if (info_size) {
|
||||
conn->info = kzalloc(info_size, GFP_NOFS);
|
||||
if (!conn->info) {
|
||||
kfree(conn);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
conn->workq = alloc_workqueue("scoutfs_net_%s",
|
||||
@@ -1293,6 +1361,7 @@ scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
init_waitqueue_head(&conn->waitq);
|
||||
conn->sockname.sin_family = AF_INET;
|
||||
conn->peername.sin_family = AF_INET;
|
||||
conn->last_peername.sin_family = AF_INET;
|
||||
INIT_LIST_HEAD(&conn->accepted_head);
|
||||
INIT_LIST_HEAD(&conn->accepted_list);
|
||||
conn->next_send_seq = 1;
|
||||
@@ -1373,13 +1442,15 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
if (WARN_ON_ONCE(conn->sock))
|
||||
return -EINVAL;
|
||||
|
||||
ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
ret = kc_sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sock->sk->sk_allocation = GFP_NOFS;
|
||||
|
||||
optval = 1;
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
(char *)&optval, sizeof(optval));
|
||||
ret = kc_sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
|
||||
&optval, sizeof(optval));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1389,20 +1460,18 @@ int scoutfs_net_bind(struct super_block *sb,
|
||||
goto out;
|
||||
|
||||
ret = kernel_listen(sock, 255);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
addrlen = sizeof(struct sockaddr_in);
|
||||
ret = kernel_getsockname(sock, (struct sockaddr *)&conn->sockname,
|
||||
&addrlen);
|
||||
if (ret == 0 && addrlen != sizeof(struct sockaddr_in))
|
||||
ret = -EAFNOSUPPORT;
|
||||
if (ret)
|
||||
ret = kc_kernel_getsockname(sock, (struct sockaddr *)&conn->sockname);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
|
||||
conn->sock = sock;
|
||||
*sin = conn->sockname;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (ret < 0 && sock)
|
||||
sock_release(sock);
|
||||
@@ -1459,8 +1528,7 @@ int scoutfs_net_connect(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
struct sockaddr_in *sin, unsigned long timeout_ms)
|
||||
{
|
||||
int error = 0;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
conn->connect_sin = *sin;
|
||||
@@ -1468,10 +1536,8 @@ int scoutfs_net_connect(struct super_block *sb,
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
queue_work(conn->workq, &conn->connect_work);
|
||||
|
||||
ret = wait_event_interruptible(conn->waitq,
|
||||
connect_result(conn, &error));
|
||||
return ret ?: error;
|
||||
wait_event(conn->waitq, connect_result(conn, &ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_valid_greeting(struct scoutfs_net_connection *conn)
|
||||
@@ -1607,10 +1673,10 @@ restart:
|
||||
conn->next_send_id = reconn->next_send_id;
|
||||
atomic64_set(&conn->recv_seq, atomic64_read(&reconn->recv_seq));
|
||||
|
||||
/* greeting response/ack will be on conn send queue */
|
||||
/* reconn should be idle while in reconn_wait */
|
||||
BUG_ON(!list_empty(&reconn->send_queue));
|
||||
BUG_ON(!list_empty(&conn->resend_queue));
|
||||
list_splice_init(&reconn->resend_queue, &conn->resend_queue);
|
||||
/* queued greeting response is racing, can be in send or resend queue */
|
||||
list_splice_tail_init(&reconn->resend_queue, &conn->resend_queue);
|
||||
|
||||
/* new conn info is unused, swap, old won't call down */
|
||||
swap(conn->info, reconn->info);
|
||||
@@ -1702,23 +1768,6 @@ int scoutfs_net_response_node(struct super_block *sb,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* The response function that was submitted with the request is not
|
||||
* called if the request is canceled here.
|
||||
*/
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id)
|
||||
{
|
||||
struct message_send *msend;
|
||||
|
||||
spin_lock(&conn->lock);
|
||||
msend = find_request(conn, cmd, id);
|
||||
if (msend)
|
||||
complete_send(conn, msend);
|
||||
spin_unlock(&conn->lock);
|
||||
}
|
||||
|
||||
struct sync_request_completion {
|
||||
struct completion comp;
|
||||
void *resp;
|
||||
@@ -1774,11 +1823,10 @@ int scoutfs_net_sync_request(struct super_block *sb,
|
||||
ret = scoutfs_net_submit_request(sb, conn, cmd, arg, arg_len,
|
||||
sync_response, &sreq, &id);
|
||||
|
||||
ret = wait_for_completion_interruptible(&sreq.comp);
|
||||
if (ret == -ERESTARTSYS)
|
||||
scoutfs_net_cancel_request(sb, conn, cmd, id);
|
||||
else
|
||||
if (ret == 0) {
|
||||
wait_for_completion(&sreq.comp);
|
||||
ret = sreq.error;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -49,6 +49,7 @@ struct scoutfs_net_connection {
|
||||
u64 greeting_id;
|
||||
struct sockaddr_in sockname;
|
||||
struct sockaddr_in peername;
|
||||
struct sockaddr_in last_peername;
|
||||
|
||||
struct list_head accepted_head;
|
||||
struct scoutfs_net_connection *listening_conn;
|
||||
@@ -99,6 +100,16 @@ static inline void scoutfs_addr_to_sin(struct sockaddr_in *sin,
|
||||
sin->sin_port = cpu_to_be16(le16_to_cpu(addr->v4.port));
|
||||
}
|
||||
|
||||
static inline void scoutfs_sin_to_addr(union scoutfs_inet_addr *addr, struct sockaddr_in *sin)
|
||||
{
|
||||
BUG_ON(sin->sin_family != AF_INET);
|
||||
|
||||
memset(addr, 0, sizeof(union scoutfs_inet_addr));
|
||||
addr->v4.family = cpu_to_le16(SCOUTFS_AF_IPV4);
|
||||
addr->v4.addr = be32_to_le32(sin->sin_addr.s_addr);
|
||||
addr->v4.port = be16_to_le16(sin->sin_port);
|
||||
}
|
||||
|
||||
struct scoutfs_net_connection *
|
||||
scoutfs_net_alloc_conn(struct super_block *sb,
|
||||
scoutfs_net_notify_t notify_up,
|
||||
@@ -123,9 +134,6 @@ int scoutfs_net_submit_request_node(struct super_block *sb,
|
||||
u64 rid, u8 cmd, void *arg, u16 arg_len,
|
||||
scoutfs_net_response_t resp_func,
|
||||
void *resp_data, u64 *id_ret);
|
||||
void scoutfs_net_cancel_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, u64 id);
|
||||
int scoutfs_net_sync_request(struct super_block *sb,
|
||||
struct scoutfs_net_connection *conn,
|
||||
u8 cmd, void *arg, unsigned arg_len,
|
||||
|
||||
889
kmod/src/omap.c
Normal file
889
kmod/src/omap.c
Normal file
@@ -0,0 +1,889 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "counters.h"
|
||||
#include "cmp.h"
|
||||
#include "inode.h"
|
||||
#include "client.h"
|
||||
#include "server.h"
|
||||
#include "omap.h"
|
||||
#include "recov.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
/*
|
||||
* As a client removes an inode from its cache with an nlink of 0 it
|
||||
* needs to decide if it is the last client using the inode and should
|
||||
* fully delete all the inode's items. It needs to know if other mounts
|
||||
* still have the inode in use.
|
||||
*
|
||||
* We need a way to communicate between mounts that an inode is in use.
|
||||
* We don't want to pay the synchronous per-file locking round trip
|
||||
* costs associated with per-inode open locks that you'd typically see
|
||||
* in systems to solve this problem. The first prototypes of this
|
||||
* tracked open file handles so this was coined the open map, though it
|
||||
* now tracks cached inodes.
|
||||
*
|
||||
* Clients maintain bitmaps that cover groups of inodes. As inodes
|
||||
* enter the cache their bit is set and as the inode is evicted the bit
|
||||
* is cleared. As deletion is attempted, either by scanning orphans or
|
||||
* evicting an inode with an nlink of 0, messages are sent around the
|
||||
* cluster to get the current bitmaps for that inode's group from all
|
||||
* active mounts. If the inode's bit is clear then it can be deleted.
|
||||
*
|
||||
* This layer maintains a list of client rids to send messages to. The
|
||||
* server calls us as clients enter and leave the cluster. We can't
|
||||
* process requests until all clients are present as a server starts up
|
||||
* so we hook into recovery and delay processing until all previously
|
||||
* existing clients are recovered or fenced.
|
||||
*/
|
||||
|
||||
struct omap_rid_list {
|
||||
int nr_rids;
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
struct omap_rid_entry {
|
||||
struct list_head head;
|
||||
u64 rid;
|
||||
};
|
||||
|
||||
struct omap_info {
|
||||
/* client */
|
||||
struct rhashtable group_ht;
|
||||
|
||||
/* server */
|
||||
struct rhashtable req_ht;
|
||||
struct llist_head requests;
|
||||
spinlock_t lock;
|
||||
struct omap_rid_list rids;
|
||||
atomic64_t next_req_id;
|
||||
};
|
||||
|
||||
#define DECLARE_OMAP_INFO(sb, name) \
|
||||
struct omap_info *name = SCOUTFS_SB(sb)->omap_info
|
||||
|
||||
/*
|
||||
* The presence of an inode in the inode sets its bit in the lock
|
||||
* group's bitmap.
|
||||
*
|
||||
* We don't want to add additional global synchronization of inode cache
|
||||
* maintenance so these are tracked in an rcu hash table. Once their
|
||||
* total reaches zero they're removed from the hash and queued for
|
||||
* freeing and readers should ignore them.
|
||||
*/
|
||||
struct omap_group {
|
||||
struct super_block *sb;
|
||||
struct rhash_head ht_head;
|
||||
struct rcu_head rcu;
|
||||
u64 nr;
|
||||
spinlock_t lock;
|
||||
unsigned int total;
|
||||
__le64 bits[SCOUTFS_OPEN_INO_MAP_LE64S];
|
||||
};
|
||||
|
||||
#define trace_group(sb, which, group, bit_nr) \
|
||||
do { \
|
||||
__typeof__(group) _grp = (group); \
|
||||
__typeof__(bit_nr) _nr = (bit_nr); \
|
||||
\
|
||||
trace_scoutfs_omap_group_##which(sb, _grp, _grp->nr, _grp->total, _nr); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Each request is initialized with the rids of currently mounted
|
||||
* clients. As each responds we remove their rid and send the response
|
||||
* once everyone has contributed.
|
||||
*
|
||||
* The request frequency will typically be low, but in a mass rm -rf
|
||||
* load we will see O(groups * clients) messages flying around.
|
||||
*/
|
||||
struct omap_request {
|
||||
struct llist_node llnode;
|
||||
struct rhash_head ht_head;
|
||||
struct rcu_head rcu;
|
||||
spinlock_t lock;
|
||||
u64 client_rid;
|
||||
u64 client_id;
|
||||
struct omap_rid_list rids;
|
||||
struct scoutfs_open_ino_map map;
|
||||
};
|
||||
|
||||
static inline void init_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
INIT_LIST_HEAD(&list->head);
|
||||
list->nr_rids = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Negative searches almost never happen.
|
||||
*/
|
||||
static struct omap_rid_entry *find_rid(struct omap_rid_list *list, u64 rid)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, &list->head, head) {
|
||||
if (rid == entry->rid)
|
||||
return entry;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int free_rid(struct omap_rid_list *list, struct omap_rid_entry *entry)
|
||||
{
|
||||
int nr;
|
||||
|
||||
list_del(&entry->head);
|
||||
nr = --list->nr_rids;
|
||||
|
||||
kfree(entry);
|
||||
return nr;
|
||||
}
|
||||
|
||||
static void free_rid_list(struct omap_rid_list *list)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &list->head, head)
|
||||
free_rid(list, entry);
|
||||
}
|
||||
|
||||
static int copy_rids(struct omap_rid_list *to, struct omap_rid_list *from, spinlock_t *from_lock)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *src;
|
||||
struct omap_rid_entry *dst;
|
||||
int nr;
|
||||
|
||||
spin_lock(from_lock);
|
||||
|
||||
while (to->nr_rids != from->nr_rids) {
|
||||
nr = from->nr_rids;
|
||||
spin_unlock(from_lock);
|
||||
|
||||
while (to->nr_rids < nr) {
|
||||
entry = kmalloc(sizeof(struct omap_rid_entry), GFP_NOFS);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
list_add_tail(&entry->head, &to->head);
|
||||
to->nr_rids++;
|
||||
}
|
||||
|
||||
while (to->nr_rids > nr) {
|
||||
entry = list_first_entry(&to->head, struct omap_rid_entry, head);
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
to->nr_rids--;
|
||||
}
|
||||
|
||||
spin_lock(from_lock);
|
||||
}
|
||||
|
||||
dst = list_first_entry(&to->head, struct omap_rid_entry, head);
|
||||
list_for_each_entry(src, &from->head, head) {
|
||||
dst->rid = src->rid;
|
||||
dst = list_next_entry(dst, head);
|
||||
}
|
||||
|
||||
spin_unlock(from_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_rids(struct omap_rid_list *list)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &list->head, head) {
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr)
|
||||
{
|
||||
*group_nr = ino >> SCOUTFS_OPEN_INO_MAP_SHIFT;
|
||||
*bit_nr = ino & SCOUTFS_OPEN_INO_MAP_MASK;
|
||||
}
|
||||
|
||||
static struct omap_group *alloc_group(struct super_block *sb, u64 group_nr)
|
||||
{
|
||||
struct omap_group *group;
|
||||
|
||||
group = kzalloc(sizeof(struct omap_group), GFP_NOFS);
|
||||
if (group) {
|
||||
group->sb = sb;
|
||||
group->nr = group_nr;
|
||||
spin_lock_init(&group->lock);
|
||||
|
||||
trace_group(sb, alloc, group, -1);
|
||||
}
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
static void free_group(struct super_block *sb, struct omap_group *group)
|
||||
{
|
||||
trace_group(sb, free, group, -1);
|
||||
kfree(group);
|
||||
}
|
||||
|
||||
static void free_group_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct omap_group *group = container_of(rcu, struct omap_group, rcu);
|
||||
|
||||
free_group(group->sb, group);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params group_ht_params = {
|
||||
.key_len = member_sizeof(struct omap_group, nr),
|
||||
.key_offset = offsetof(struct omap_group, nr),
|
||||
.head_offset = offsetof(struct omap_group, ht_head),
|
||||
};
|
||||
|
||||
/*
|
||||
* Track an cached inode in its group. Our set can be racing with a
|
||||
* final clear that removes the group from the hash, sets total to
|
||||
* UINT_MAX, and calls rcu free. We can retry until the dead group is
|
||||
* no longer visible in the hash table and we can insert a new allocated
|
||||
* group.
|
||||
*
|
||||
* The caller must ensure that the bit is clear, -EEXIST will be
|
||||
* returned otherwise.
|
||||
*/
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
bool found;
|
||||
int ret = 0;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
retry:
|
||||
found = false;
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
if (group->total < UINT_MAX) {
|
||||
found = true;
|
||||
if (WARN_ON_ONCE(test_and_set_bit_le(bit_nr, group->bits)))
|
||||
ret = -EEXIST;
|
||||
else
|
||||
group->total++;
|
||||
}
|
||||
trace_group(sb, inc, group, bit_nr);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!found) {
|
||||
group = alloc_group(sb, group_nr);
|
||||
if (group) {
|
||||
ret = rhashtable_lookup_insert_fast(&ominf->group_ht, &group->ht_head,
|
||||
group_ht_params);
|
||||
if (ret < 0)
|
||||
free_group(sb, group);
|
||||
if (ret == -EEXIST)
|
||||
ret = 0;
|
||||
if (ret == -EBUSY) {
|
||||
/* wait for rehash to finish */
|
||||
synchronize_rcu();
|
||||
ret = 0;
|
||||
}
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
bool ret = false;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
ret = !!test_bit_le(bit_nr, group->bits);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear a previously set ino bit. Trying to clear a bit that's already
|
||||
* clear implies imbalanced set/clear or bugs freeing groups. We only
|
||||
* free groups here as the last clear drops the group's total to 0.
|
||||
*/
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_group *group;
|
||||
u64 group_nr;
|
||||
int bit_nr;
|
||||
|
||||
scoutfs_omap_calc_group_nrs(ino, &group_nr, &bit_nr);
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
WARN_ON_ONCE(!test_bit_le(bit_nr, group->bits));
|
||||
WARN_ON_ONCE(group->total == 0);
|
||||
WARN_ON_ONCE(group->total == UINT_MAX);
|
||||
if (test_and_clear_bit_le(bit_nr, group->bits)) {
|
||||
if (--group->total == 0) {
|
||||
group->total = UINT_MAX;
|
||||
rhashtable_remove_fast(&ominf->group_ht, &group->ht_head,
|
||||
group_ht_params);
|
||||
call_rcu(&group->rcu, free_group_rcu);
|
||||
}
|
||||
}
|
||||
trace_group(sb, dec, group, bit_nr);
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
WARN_ON_ONCE(!group);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server adds rids as it discovers clients. We add them to the
|
||||
* list of rids to send map requests to.
|
||||
*/
|
||||
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_rid_entry *entry;
|
||||
struct omap_rid_entry *found;
|
||||
|
||||
entry = kmalloc(sizeof(struct omap_rid_entry), GFP_NOFS);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
found = find_rid(&ominf->rids, rid);
|
||||
if (!found) {
|
||||
entry->rid = rid;
|
||||
list_add_tail(&entry->head, &ominf->rids.head);
|
||||
ominf->rids.nr_rids++;
|
||||
}
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
if (found)
|
||||
kfree(entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_req(struct omap_request *req)
|
||||
{
|
||||
free_rids(&req->rids);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static void free_req_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct omap_request *req = container_of(rcu, struct omap_request, rcu);
|
||||
|
||||
free_req(req);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params req_ht_params = {
|
||||
.key_len = member_sizeof(struct omap_request, map.args.req_id),
|
||||
.key_offset = offsetof(struct omap_request, map.args.req_id),
|
||||
.head_offset = offsetof(struct omap_request, ht_head),
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove a rid from all the pending requests. If it's the last rid we
|
||||
* give the caller the details to send a response, they'll call back to
|
||||
* keep removing. If their send fails they're going to shutdown the
|
||||
* server so we can queue freeing the request as we give it to them.
|
||||
*/
|
||||
static int remove_rid_from_reqs(struct omap_info *ominf, u64 rid, u64 *resp_rid, u64 *resp_id,
|
||||
struct scoutfs_open_ino_map *map)
|
||||
{
|
||||
struct omap_rid_entry *entry;
|
||||
struct rhashtable_iter iter;
|
||||
struct omap_request *req;
|
||||
int ret = 0;
|
||||
|
||||
rhashtable_walk_enter(&ominf->req_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
for (;;) {
|
||||
req = rhashtable_walk_next(&iter);
|
||||
if (req == NULL)
|
||||
break;
|
||||
if (req == ERR_PTR(-EAGAIN))
|
||||
continue;
|
||||
|
||||
spin_lock(&req->lock);
|
||||
entry = find_rid(&req->rids, rid);
|
||||
if (entry && free_rid(&req->rids, entry) == 0) {
|
||||
*resp_rid = req->client_rid;
|
||||
*resp_id = req->client_id;
|
||||
memcpy(map, &req->map, sizeof(struct scoutfs_open_ino_map));
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
call_rcu(&req->rcu, free_req_rcu);
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock(&req->lock);
|
||||
if (ret > 0)
|
||||
break;
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
if (ret <= 0) {
|
||||
*resp_rid = 0;
|
||||
*resp_id = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A client has been evicted. Remove its rid from the list and walk
|
||||
* through all the pending requests and remove its rids, sending the
|
||||
* response if it was the last rid waiting for a response.
|
||||
*
|
||||
* If this returns an error then the server will shut down.
|
||||
*
|
||||
* This can be called multiple times by different servers if there are
|
||||
* errors reclaiming an evicted mount, so we allow asking to remove a
|
||||
* rid that hasn't been added.
|
||||
*/
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct scoutfs_open_ino_map *map = NULL;
|
||||
struct omap_rid_entry *entry;
|
||||
u64 resp_rid = 0;
|
||||
u64 resp_id = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
entry = find_rid(&ominf->rids, rid);
|
||||
if (entry)
|
||||
free_rid(&ominf->rids, entry);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
if (!entry) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* remove the rid from all pending requests, sending responses if it was final */
|
||||
for (;;) {
|
||||
ret = remove_rid_from_reqs(ominf, rid, &resp_rid, &resp_id, map);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
ret = scoutfs_server_send_omap_response(sb, resp_rid, resp_id, map, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a single incoming request in the server. This could have been
|
||||
* delayed by recovery. This only returns an error if we couldn't send
|
||||
* a processing error response to the client.
|
||||
*/
|
||||
static int handle_request(struct super_block *sb, struct omap_request *req)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_rid_list priv_rids;
|
||||
struct omap_rid_entry *entry;
|
||||
int ret;
|
||||
|
||||
init_rid_list(&priv_rids);
|
||||
|
||||
ret = copy_rids(&priv_rids, &ominf->rids, &ominf->lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* don't send a request to the client who originated this request */
|
||||
entry = find_rid(&priv_rids, req->client_rid);
|
||||
if (entry && free_rid(&priv_rids, entry) == 0) {
|
||||
ret = scoutfs_server_send_omap_response(sb, req->client_rid, req->client_id,
|
||||
&req->map, 0);
|
||||
kfree(req);
|
||||
req = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* this lock isn't needed but sparse gave warnings with conditional locking */
|
||||
ret = copy_rids(&req->rids, &priv_rids, &ominf->lock);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
do {
|
||||
ret = rhashtable_insert_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
if (ret == -EBUSY)
|
||||
synchronize_rcu(); /* wait for rehash to finish */
|
||||
} while (ret == -EBUSY);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We can start getting responses the moment we send the first response. After
|
||||
* we send the last request the req can be freed.
|
||||
*/
|
||||
while ((entry = list_first_entry_or_null(&priv_rids.head, struct omap_rid_entry, head))) {
|
||||
ret = scoutfs_server_send_omap_request(sb, entry->rid, &req->map.args);
|
||||
if (ret < 0) {
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
goto out;
|
||||
}
|
||||
|
||||
free_rid(&priv_rids, entry);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
free_rids(&priv_rids);
|
||||
if (ret < 0) {
|
||||
ret = scoutfs_server_send_omap_response(sb, req->client_rid, req->client_id,
|
||||
NULL, ret);
|
||||
free_req(req);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle all previously received omap requests from clients. Once
|
||||
* we've finished recovery and can send requests to all clients we can
|
||||
* handle all pending requests. The handling function frees the request
|
||||
* and only returns an error if it couldn't send a response to the
|
||||
* client.
|
||||
*/
|
||||
static int handle_requests(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct llist_node *requests;
|
||||
struct omap_request *req;
|
||||
struct omap_request *tmp;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
if (scoutfs_recov_next_pending(sb, 0, SCOUTFS_RECOV_GREETING))
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
requests = llist_del_all(&ominf->requests);
|
||||
|
||||
llist_for_each_entry_safe(req, tmp, requests, llnode) {
|
||||
err = handle_request(sb, req);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_omap_finished_recovery(struct super_block *sb)
|
||||
{
|
||||
return handle_requests(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is receiving a request from a client for the bitmap of all
|
||||
* open inodes around their ino. Queue it for processing which is
|
||||
* typically immediate and inline but which can be deferred by recovery
|
||||
* as the server first starts up.
|
||||
*/
|
||||
int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct omap_request *req;
|
||||
|
||||
req = kzalloc(sizeof(struct omap_request), GFP_NOFS);
|
||||
if (req == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&req->lock);
|
||||
req->client_rid = rid;
|
||||
req->client_id = id;
|
||||
init_rid_list(&req->rids);
|
||||
req->map.args.group_nr = args->group_nr;
|
||||
req->map.args.req_id = cpu_to_le64(atomic64_inc_return(&ominf->next_req_id));
|
||||
|
||||
llist_add(&req->llnode, &ominf->requests);
|
||||
|
||||
return handle_requests(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The client is receiving a request from the server for its map for the
|
||||
* given group. Look up the group and copy the bits to the map.
|
||||
*
|
||||
* The mount originating the request for this bitmap has the inode group
|
||||
* write locked. We can't be adding links to any inodes in the group
|
||||
* because that requires the lock. Inodes bits can be set and cleared
|
||||
* while we're sampling the bitmap. These races are fine, they can't be
|
||||
* adding cached inodes if nlink is 0 and we don't have the lock. If
|
||||
* the caller is removing a set bit then they're about to try and delete
|
||||
* the inode themselves and will first have to acquire the cluster lock
|
||||
* themselves.
|
||||
*/
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
u64 group_nr = le64_to_cpu(args->group_nr);
|
||||
struct scoutfs_open_ino_map *map;
|
||||
struct omap_group *group;
|
||||
bool copied = false;
|
||||
int ret;
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
map->args = *args;
|
||||
|
||||
rcu_read_lock();
|
||||
group = rhashtable_lookup(&ominf->group_ht, &group_nr, group_ht_params);
|
||||
if (group) {
|
||||
spin_lock(&group->lock);
|
||||
trace_group(sb, request, group, -1);
|
||||
if (group->total > 0 && group->total < UINT_MAX) {
|
||||
memcpy(map->bits, group->bits, sizeof(map->bits));
|
||||
copied = true;
|
||||
}
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!copied)
|
||||
memset(map->bits, 0, sizeof(map->bits));
|
||||
|
||||
ret = scoutfs_client_send_omap_response(sb, id, map);
|
||||
kfree(map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server has received an open ino map response from a client. Find
|
||||
* the original request that it's serving, or in the response's map, and
|
||||
* send a reply if this was the last response from a client we were
|
||||
* waiting for.
|
||||
*
|
||||
* We can get responses for requests we're no longer tracking if, for
|
||||
* example, sending to a client gets an error. We'll have already sent
|
||||
* the response to the requesting client so we drop these responses on
|
||||
* the floor.
|
||||
*/
|
||||
int scoutfs_omap_server_handle_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_open_ino_map *resp_map)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct scoutfs_open_ino_map *map;
|
||||
struct omap_rid_entry *entry;
|
||||
bool send_response = false;
|
||||
struct omap_request *req;
|
||||
u64 resp_rid;
|
||||
u64 resp_id;
|
||||
int ret;
|
||||
|
||||
map = kmalloc(sizeof(struct scoutfs_open_ino_map), GFP_NOFS);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
req = rhashtable_lookup(&ominf->req_ht, &resp_map->args.req_id, req_ht_params);
|
||||
if (req) {
|
||||
spin_lock(&req->lock);
|
||||
entry = find_rid(&req->rids, rid);
|
||||
if (entry) {
|
||||
bitmap_or((unsigned long *)req->map.bits, (unsigned long *)req->map.bits,
|
||||
(unsigned long *)resp_map->bits, SCOUTFS_OPEN_INO_MAP_BITS);
|
||||
if (free_rid(&req->rids, entry) == 0)
|
||||
send_response = true;
|
||||
}
|
||||
spin_unlock(&req->lock);
|
||||
|
||||
if (send_response) {
|
||||
resp_rid = req->client_rid;
|
||||
resp_id = req->client_id;
|
||||
memcpy(map, &req->map, sizeof(struct scoutfs_open_ino_map));
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
call_rcu(&req->rcu, free_req_rcu);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (send_response)
|
||||
ret = scoutfs_server_send_omap_response(sb, resp_rid, resp_id, map, 0);
|
||||
else
|
||||
ret = 0;
|
||||
kfree(map);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is shutting down. Free all the server state associated
|
||||
* with ongoing request processing. Clients who still have requests
|
||||
* pending will resend them to the next server.
|
||||
*/
|
||||
void scoutfs_omap_server_shutdown(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct rhashtable_iter iter;
|
||||
struct llist_node *requests;
|
||||
struct omap_request *req;
|
||||
struct omap_request *tmp;
|
||||
|
||||
rhashtable_walk_enter(&ominf->req_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
for (;;) {
|
||||
req = rhashtable_walk_next(&iter);
|
||||
if (req == NULL)
|
||||
break;
|
||||
if (req == ERR_PTR(-EAGAIN))
|
||||
continue;
|
||||
|
||||
if (req->rids.nr_rids != 0) {
|
||||
free_rids(&req->rids);
|
||||
rhashtable_remove_fast(&ominf->req_ht, &req->ht_head, req_ht_params);
|
||||
call_rcu(&req->rcu, free_req_rcu);
|
||||
}
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
requests = llist_del_all(&ominf->requests);
|
||||
llist_for_each_entry_safe(req, tmp, requests, llnode)
|
||||
kfree(req);
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
free_rid_list(&ominf->rids);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
int scoutfs_omap_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct omap_info *ominf;
|
||||
int ret;
|
||||
|
||||
ominf = kzalloc(sizeof(struct omap_info), GFP_KERNEL);
|
||||
if (!ominf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rhashtable_init(&ominf->group_ht, &group_ht_params);
|
||||
if (ret < 0) {
|
||||
kfree(ominf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rhashtable_init(&ominf->req_ht, &req_ht_params);
|
||||
if (ret < 0) {
|
||||
rhashtable_destroy(&ominf->group_ht);
|
||||
kfree(ominf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_llist_head(&ominf->requests);
|
||||
spin_lock_init(&ominf->lock);
|
||||
init_rid_list(&ominf->rids);
|
||||
atomic64_set(&ominf->next_req_id, 0);
|
||||
|
||||
sbi->omap_info = ominf;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* To get here the server must have shut down, freeing requests, and
|
||||
* evict must have been called on all cached inodes so we can just
|
||||
* synchronize all the pending group frees.
|
||||
*/
|
||||
void scoutfs_omap_destroy(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OMAP_INFO(sb, ominf);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct rhashtable_iter iter;
|
||||
|
||||
if (ominf) {
|
||||
synchronize_rcu();
|
||||
|
||||
/* double check that all the groups deced to 0 and were freed */
|
||||
rhashtable_walk_enter(&ominf->group_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
WARN_ON_ONCE(rhashtable_walk_peek(&iter) != NULL);
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
spin_lock(&ominf->lock);
|
||||
free_rid_list(&ominf->rids);
|
||||
spin_unlock(&ominf->lock);
|
||||
|
||||
rhashtable_destroy(&ominf->group_ht);
|
||||
rhashtable_destroy(&ominf->req_ht);
|
||||
kfree(ominf);
|
||||
sbi->omap_info = NULL;
|
||||
}
|
||||
}
|
||||
23
kmod/src/omap.h
Normal file
23
kmod/src/omap.h
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef _SCOUTFS_OMAP_H_
|
||||
#define _SCOUTFS_OMAP_H_
|
||||
|
||||
int scoutfs_omap_set(struct super_block *sb, u64 ino);
|
||||
bool scoutfs_omap_test(struct super_block *sb, u64 ino);
|
||||
void scoutfs_omap_clear(struct super_block *sb, u64 ino);
|
||||
int scoutfs_omap_client_handle_request(struct super_block *sb, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
void scoutfs_omap_calc_group_nrs(u64 ino, u64 *group_nr, int *bit_nr);
|
||||
|
||||
int scoutfs_omap_add_rid(struct super_block *sb, u64 rid);
|
||||
int scoutfs_omap_remove_rid(struct super_block *sb, u64 rid);
|
||||
int scoutfs_omap_finished_recovery(struct super_block *sb);
|
||||
int scoutfs_omap_server_handle_request(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
int scoutfs_omap_server_handle_response(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_open_ino_map *resp_map);
|
||||
void scoutfs_omap_server_shutdown(struct super_block *sb);
|
||||
|
||||
int scoutfs_omap_setup(struct super_block *sb);
|
||||
void scoutfs_omap_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
@@ -26,22 +26,43 @@
|
||||
#include "msg.h"
|
||||
#include "options.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
#include "alloc.h"
|
||||
|
||||
enum {
|
||||
Opt_acl,
|
||||
Opt_data_prealloc_blocks,
|
||||
Opt_data_prealloc_contig_only,
|
||||
Opt_log_merge_wait_timeout_ms,
|
||||
Opt_metadev_path,
|
||||
Opt_noacl,
|
||||
Opt_orphan_scan_delay_ms,
|
||||
Opt_quorum_heartbeat_timeout_ms,
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_acl, "acl"},
|
||||
{Opt_data_prealloc_blocks, "data_prealloc_blocks=%s"},
|
||||
{Opt_data_prealloc_contig_only, "data_prealloc_contig_only=%s"},
|
||||
{Opt_log_merge_wait_timeout_ms, "log_merge_wait_timeout_ms=%s"},
|
||||
{Opt_metadev_path, "metadev_path=%s"},
|
||||
{Opt_noacl, "noacl"},
|
||||
{Opt_orphan_scan_delay_ms, "orphan_scan_delay_ms=%s"},
|
||||
{Opt_quorum_heartbeat_timeout_ms, "quorum_heartbeat_timeout_ms=%s"},
|
||||
{Opt_quorum_slot_nr, "quorum_slot_nr=%s"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
struct options_sb_info {
|
||||
struct dentry *debugfs_dir;
|
||||
struct options_info {
|
||||
seqlock_t seqlock;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct scoutfs_sysfs_attrs sysfs_attrs;
|
||||
};
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
#define DECLARE_OPTIONS_INFO(sb, name) \
|
||||
struct options_info *name = SCOUTFS_SB(sb)->options_info
|
||||
|
||||
static int parse_bdev_path(struct super_block *sb, substring_t *substr,
|
||||
char **bdev_path_ret)
|
||||
@@ -89,58 +110,185 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed)
|
||||
static void free_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
kfree(opts->metadev_path);
|
||||
}
|
||||
|
||||
#define MIN_LOG_MERGE_WAIT_TIMEOUT_MS 100UL
|
||||
#define DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS 500
|
||||
#define MAX_LOG_MERGE_WAIT_TIMEOUT_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
#define MIN_ORPHAN_SCAN_DELAY_MS 100UL
|
||||
#define DEFAULT_ORPHAN_SCAN_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_ORPHAN_SCAN_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
#define MIN_DATA_PREALLOC_BLOCKS 1ULL
|
||||
#define MAX_DATA_PREALLOC_BLOCKS ((unsigned long long)SCOUTFS_BLOCK_SM_MAX)
|
||||
|
||||
static void init_default_options(struct scoutfs_mount_options *opts)
|
||||
{
|
||||
memset(opts, 0, sizeof(*opts));
|
||||
|
||||
opts->data_prealloc_blocks = SCOUTFS_DATA_PREALLOC_DEFAULT_BLOCKS;
|
||||
opts->data_prealloc_contig_only = 1;
|
||||
opts->log_merge_wait_timeout_ms = DEFAULT_LOG_MERGE_WAIT_TIMEOUT_MS;
|
||||
opts->orphan_scan_delay_ms = -1;
|
||||
opts->quorum_heartbeat_timeout_ms = SCOUTFS_QUORUM_DEF_HB_TIMEO_MS;
|
||||
opts->quorum_slot_nr = -1;
|
||||
}
|
||||
|
||||
static int verify_log_merge_wait_timeout_ms(struct super_block *sb, int ret, int val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse log_merge_wait_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < MIN_LOG_MERGE_WAIT_TIMEOUT_MS || val > MAX_LOG_MERGE_WAIT_TIMEOUT_MS) {
|
||||
scoutfs_err(sb, "invalid log_merge_wait_timeout_ms value %d, must be between %lu and %lu",
|
||||
val, MIN_LOG_MERGE_WAIT_TIMEOUT_MS, MAX_LOG_MERGE_WAIT_TIMEOUT_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_quorum_heartbeat_timeout_ms(struct super_block *sb, int ret, u64 val)
|
||||
{
|
||||
if (ret < 0) {
|
||||
scoutfs_err(sb, "failed to parse quorum_heartbeat_timeout_ms value");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val < SCOUTFS_QUORUM_MIN_HB_TIMEO_MS || val > SCOUTFS_QUORUM_MAX_HB_TIMEO_MS) {
|
||||
scoutfs_err(sb, "invalid quorum_heartbeat_timeout_ms value %llu, must be between %lu and %lu",
|
||||
val, SCOUTFS_QUORUM_MIN_HB_TIMEO_MS, SCOUTFS_QUORUM_MAX_HB_TIMEO_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the option string into our options struct. This can allocate
|
||||
* memory in the struct. The caller is responsible for always calling
|
||||
* free_options() when the struct is destroyed, including when we return
|
||||
* an error.
|
||||
*/
|
||||
static int parse_options(struct super_block *sb, char *options, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
u64 nr64;
|
||||
int nr;
|
||||
int token;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
/* Set defaults */
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
parsed->quorum_slot_nr = -1;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_quorum_slot_nr:
|
||||
|
||||
if (parsed->quorum_slot_nr != -1) {
|
||||
case Opt_acl:
|
||||
sb->s_flags |= SB_POSIXACL;
|
||||
break;
|
||||
|
||||
case Opt_data_prealloc_blocks:
|
||||
ret = match_u64(args, &nr64);
|
||||
if (ret < 0 ||
|
||||
nr64 < MIN_DATA_PREALLOC_BLOCKS || nr64 > MAX_DATA_PREALLOC_BLOCKS) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_blocks option, must be between %llu and %llu",
|
||||
MIN_DATA_PREALLOC_BLOCKS, MAX_DATA_PREALLOC_BLOCKS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->data_prealloc_blocks = nr64;
|
||||
break;
|
||||
|
||||
case Opt_data_prealloc_contig_only:
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 || nr > 1) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_contig_only option, bool must only be 0 or 1");
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->data_prealloc_contig_only = nr;
|
||||
break;
|
||||
|
||||
case Opt_log_merge_wait_timeout_ms:
|
||||
ret = match_int(args, &nr);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, nr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->log_merge_wait_timeout_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_metadev_path:
|
||||
ret = parse_bdev_path(sb, &args[0], &opts->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case Opt_noacl:
|
||||
sb->s_flags &= ~SB_POSIXACL;
|
||||
break;
|
||||
|
||||
case Opt_orphan_scan_delay_ms:
|
||||
if (opts->orphan_scan_delay_ms != -1) {
|
||||
scoutfs_err(sb, "multiple orphan_scan_delay_ms options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 ||
|
||||
nr < MIN_ORPHAN_SCAN_DELAY_MS || nr > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms option, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
opts->orphan_scan_delay_ms = nr;
|
||||
break;
|
||||
|
||||
case Opt_quorum_heartbeat_timeout_ms:
|
||||
ret = match_u64(args, &nr64);
|
||||
ret = verify_quorum_heartbeat_timeout_ms(sb, ret, nr64);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
opts->quorum_heartbeat_timeout_ms = nr64;
|
||||
break;
|
||||
|
||||
case Opt_quorum_slot_nr:
|
||||
if (opts->quorum_slot_nr != -1) {
|
||||
scoutfs_err(sb, "multiple quorum_slot_nr options provided, only provide one.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = match_int(args, &nr);
|
||||
if (ret < 0 || nr < 0 ||
|
||||
nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
if (ret < 0 || nr < 0 || nr >= SCOUTFS_QUORUM_MAX_SLOTS) {
|
||||
scoutfs_err(sb, "invalid quorum_slot_nr option, must be between 0 and %u",
|
||||
SCOUTFS_QUORUM_MAX_SLOTS - 1);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
parsed->quorum_slot_nr = nr;
|
||||
opts->quorum_slot_nr = nr;
|
||||
break;
|
||||
case Opt_metadev_path:
|
||||
|
||||
ret = parse_bdev_path(sb, &args[0],
|
||||
&parsed->metadev_path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"",
|
||||
p);
|
||||
break;
|
||||
scoutfs_err(sb, "Unknown or malformed option, \"%s\"", p);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!parsed->metadev_path) {
|
||||
if (opts->orphan_scan_delay_ms == -1)
|
||||
opts->orphan_scan_delay_ms = DEFAULT_ORPHAN_SCAN_DELAY_MS;
|
||||
|
||||
if (!opts->metadev_path) {
|
||||
scoutfs_err(sb, "Required mount option \"metadev_path\" not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -148,40 +296,343 @@ int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
unsigned int seq;
|
||||
|
||||
if (WARN_ON_ONCE(optinf == NULL)) {
|
||||
/* trying to use options before early setup or after destroy */
|
||||
init_default_options(opts);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&optinf->seqlock);
|
||||
memcpy(opts, &optinf->opts, sizeof(struct scoutfs_mount_options));
|
||||
} while (read_seqretry(&optinf->seqlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* Early setup that parses and stores the options so that the rest of
|
||||
* setup can use them. Full options setup that relies on other
|
||||
* components will be done later.
|
||||
*/
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct options_sb_info *osi;
|
||||
struct scoutfs_mount_options opts;
|
||||
struct options_info *optinf;
|
||||
int ret;
|
||||
|
||||
osi = kzalloc(sizeof(struct options_sb_info), GFP_KERNEL);
|
||||
if (!osi)
|
||||
return -ENOMEM;
|
||||
init_default_options(&opts);
|
||||
|
||||
sbi->options = osi;
|
||||
ret = parse_options(sb, options, &opts);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
osi->debugfs_dir = debugfs_create_dir("options", sbi->debug_root);
|
||||
if (!osi->debugfs_dir) {
|
||||
optinf = kzalloc(sizeof(struct options_info), GFP_KERNEL);
|
||||
if (!optinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seqlock_init(&optinf->seqlock);
|
||||
scoutfs_sysfs_init_attrs(sb, &optinf->sysfs_attrs);
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts = opts;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
sbi->options_info = optinf;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
free_options(&opts);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct scoutfs_mount_options opts;
|
||||
const bool is_acl = !!(sb->s_flags & SB_POSIXACL);
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
if (is_acl)
|
||||
seq_puts(seq, ",acl");
|
||||
seq_printf(seq, ",data_prealloc_blocks=%llu", opts.data_prealloc_blocks);
|
||||
seq_printf(seq, ",data_prealloc_contig_only=%u", opts.data_prealloc_contig_only);
|
||||
seq_printf(seq, ",metadev_path=%s", opts.metadev_path);
|
||||
if (!is_acl)
|
||||
seq_puts(seq, ",noacl");
|
||||
seq_printf(seq, ",orphan_scan_delay_ms=%u", opts.orphan_scan_delay_ms);
|
||||
if (opts.quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts.quorum_slot_nr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t data_prealloc_blocks_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", opts.data_prealloc_blocks);
|
||||
}
|
||||
static ssize_t data_prealloc_blocks_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_DATA_PREALLOC_BLOCKS || val > MAX_DATA_PREALLOC_BLOCKS) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_blocks option, must be between %llu and %llu",
|
||||
MIN_DATA_PREALLOC_BLOCKS, MAX_DATA_PREALLOC_BLOCKS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.data_prealloc_blocks = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_blocks);
|
||||
|
||||
static ssize_t data_prealloc_contig_only_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.data_prealloc_contig_only);
|
||||
}
|
||||
static ssize_t data_prealloc_contig_only_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < 0 || val > 1) {
|
||||
scoutfs_err(sb, "invalid data_prealloc_contig_only option, bool must be 0 or 1");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.data_prealloc_contig_only = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(data_prealloc_contig_only);
|
||||
|
||||
static ssize_t log_merge_wait_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.log_merge_wait_timeout_ms);
|
||||
}
|
||||
static ssize_t log_merge_wait_timeout_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
int val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoint(nullterm, 0, &val);
|
||||
ret = verify_log_merge_wait_timeout_ms(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.log_merge_wait_timeout_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(log_merge_wait_timeout_ms);
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts.metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t orphan_scan_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", opts.orphan_scan_delay_ms);
|
||||
}
|
||||
static ssize_t orphan_scan_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[20]; /* more than enough for octal -U32_MAX */
|
||||
long val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtol(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_ORPHAN_SCAN_DELAY_MS || val > MAX_ORPHAN_SCAN_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid orphan_scan_delay_ms value written to options sysfs file, must be between %lu and %lu",
|
||||
MIN_ORPHAN_SCAN_DELAY_MS, MAX_ORPHAN_SCAN_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.orphan_scan_delay_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
|
||||
scoutfs_inode_schedule_orphan_dwork(sb);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(orphan_scan_delay_ms);
|
||||
|
||||
static ssize_t quorum_heartbeat_timeout_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", opts.quorum_heartbeat_timeout_ms);
|
||||
}
|
||||
static ssize_t quorum_heartbeat_timeout_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
ret = verify_quorum_heartbeat_timeout_ms(sb, ret, val);
|
||||
if (ret == 0) {
|
||||
write_seqlock(&optinf->seqlock);
|
||||
optinf->opts.quorum_heartbeat_timeout_ms = val;
|
||||
write_sequnlock(&optinf->seqlock);
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(quorum_heartbeat_timeout_ms);
|
||||
|
||||
static ssize_t quorum_slot_nr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct scoutfs_mount_options opts;
|
||||
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts.quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_slot_nr);
|
||||
|
||||
static struct attribute *options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_blocks),
|
||||
SCOUTFS_ATTR_PTR(data_prealloc_contig_only),
|
||||
SCOUTFS_ATTR_PTR(log_merge_wait_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(orphan_scan_delay_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_heartbeat_timeout_ms),
|
||||
SCOUTFS_ATTR_PTR(quorum_slot_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
int scoutfs_options_setup(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &optinf->sysfs_attrs, options_attrs, "mount_options");
|
||||
if (ret < 0)
|
||||
scoutfs_options_destroy(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We remove the sysfs files early in unmount so that they can't try to call other subsystems
|
||||
* as they're being destroyed.
|
||||
*/
|
||||
void scoutfs_options_stop(struct super_block *sb)
|
||||
{
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (optinf)
|
||||
scoutfs_sysfs_destroy_attrs(sb, &optinf->sysfs_attrs);
|
||||
}
|
||||
|
||||
void scoutfs_options_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct options_sb_info *osi = sbi->options;
|
||||
DECLARE_OPTIONS_INFO(sb, optinf);
|
||||
|
||||
if (osi) {
|
||||
if (osi->debugfs_dir)
|
||||
debugfs_remove_recursive(osi->debugfs_dir);
|
||||
kfree(osi);
|
||||
sbi->options = NULL;
|
||||
scoutfs_options_stop(sb);
|
||||
|
||||
if (optinf) {
|
||||
free_options(&optinf->opts);
|
||||
kfree(optinf);
|
||||
sbi->options_info = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,23 +5,22 @@
|
||||
#include <linux/in.h>
|
||||
#include "format.h"
|
||||
|
||||
enum scoutfs_mount_options {
|
||||
Opt_quorum_slot_nr,
|
||||
Opt_metadev_path,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
struct mount_options {
|
||||
int quorum_slot_nr;
|
||||
struct scoutfs_mount_options {
|
||||
u64 data_prealloc_blocks;
|
||||
bool data_prealloc_contig_only;
|
||||
unsigned int log_merge_wait_timeout_ms;
|
||||
char *metadev_path;
|
||||
unsigned int orphan_scan_delay_ms;
|
||||
int quorum_slot_nr;
|
||||
u64 quorum_heartbeat_timeout_ms;
|
||||
};
|
||||
|
||||
int scoutfs_parse_options(struct super_block *sb, char *options,
|
||||
struct mount_options *parsed);
|
||||
void scoutfs_options_read(struct super_block *sb, struct scoutfs_mount_options *opts);
|
||||
int scoutfs_options_show(struct seq_file *seq, struct dentry *root);
|
||||
|
||||
int scoutfs_options_early_setup(struct super_block *sb, char *options);
|
||||
int scoutfs_options_setup(struct super_block *sb);
|
||||
void scoutfs_options_stop(struct super_block *sb);
|
||||
void scoutfs_options_destroy(struct super_block *sb);
|
||||
|
||||
u32 scoutfs_option_u32(struct super_block *sb, int token);
|
||||
#define scoutfs_option_bool scoutfs_option_u32
|
||||
|
||||
#endif /* _SCOUTFS_OPTIONS_H_ */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,12 +2,14 @@
|
||||
#define _SCOUTFS_QUORUM_H_
|
||||
|
||||
int scoutfs_quorum_server_sin(struct super_block *sb, struct sockaddr_in *sin);
|
||||
void scoutfs_quorum_server_shutdown(struct super_block *sb);
|
||||
|
||||
u8 scoutfs_quorum_votes_needed(struct super_block *sb);
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_super_block *super, int i,
|
||||
void scoutfs_quorum_slot_sin(struct scoutfs_quorum_config *qconf, int i,
|
||||
struct sockaddr_in *sin);
|
||||
|
||||
int scoutfs_quorum_fence_leaders(struct super_block *sb, struct scoutfs_quorum_config *qconf,
|
||||
u64 term);
|
||||
|
||||
int scoutfs_quorum_setup(struct super_block *sb);
|
||||
void scoutfs_quorum_shutdown(struct super_block *sb);
|
||||
void scoutfs_quorum_destroy(struct super_block *sb);
|
||||
|
||||
1266
kmod/src/quota.c
Normal file
1266
kmod/src/quota.c
Normal file
File diff suppressed because it is too large
Load Diff
48
kmod/src/quota.h
Normal file
48
kmod/src/quota.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef _SCOUTFS_QUOTA_H_
|
||||
#define _SCOUTFS_QUOTA_H_
|
||||
|
||||
#include "ioctl.h"
|
||||
|
||||
/*
|
||||
* Each rule's name can be in the ruleset's rbtree associated with the
|
||||
* source attr that it selects. This lets checks only test rules that
|
||||
* the inputs could match. The 'i' field indicates which name is in the
|
||||
* tree so we can find the containing rule.
|
||||
*
|
||||
* This is mostly private to quota.c but we expose it for tracing.
|
||||
*/
|
||||
struct squota_rule {
|
||||
u64 limit;
|
||||
u8 prio;
|
||||
u8 op;
|
||||
u8 rule_flags;
|
||||
struct squota_rule_name {
|
||||
struct rb_node node;
|
||||
u64 val;
|
||||
u8 source;
|
||||
u8 flags;
|
||||
u8 i;
|
||||
} names[3];
|
||||
};
|
||||
|
||||
/* private to quota.c, only here for tracing */
|
||||
struct squota_input {
|
||||
u64 attrs[SQ_NS__NR_SELECT];
|
||||
u8 op;
|
||||
};
|
||||
|
||||
int scoutfs_quota_check_inode(struct super_block *sb, struct inode *dir);
|
||||
int scoutfs_quota_check_data(struct super_block *sb, struct inode *inode);
|
||||
|
||||
int scoutfs_quota_get_rules(struct super_block *sb, u64 *iterator,
|
||||
struct scoutfs_ioctl_quota_rule *irules, int nr);
|
||||
int scoutfs_quota_mod_rule(struct super_block *sb, bool is_add,
|
||||
struct scoutfs_ioctl_quota_rule *irule);
|
||||
|
||||
void scoutfs_quota_get_lock_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_quota_invalidate(struct super_block *sb);
|
||||
|
||||
int scoutfs_quota_setup(struct super_block *sb);
|
||||
void scoutfs_quota_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
305
kmod/src/recov.c
Normal file
305
kmod/src/recov.c
Normal file
@@ -0,0 +1,305 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "recov.h"
|
||||
#include "cmp.h"
|
||||
|
||||
/*
|
||||
* There are a few server messages which can't be processed until they
|
||||
* know that they have state for all possibly active clients. These
|
||||
* little helpers track which clients have recovered what state and give
|
||||
* those message handlers a call to check if recovery has completed. We
|
||||
* track the timeout here, but all we do is call back into the server to
|
||||
* take steps to evict timed out clients and then let us know that their
|
||||
* recovery has finished.
|
||||
*/
|
||||
|
||||
struct recov_info {
|
||||
struct super_block *sb;
|
||||
spinlock_t lock;
|
||||
struct list_head pending;
|
||||
struct timer_list timer;
|
||||
void (*timeout_fn)(struct super_block *);
|
||||
};
|
||||
|
||||
#define DECLARE_RECOV_INFO(sb, name) \
|
||||
struct recov_info *name = SCOUTFS_SB(sb)->recov_info
|
||||
|
||||
struct recov_pending {
|
||||
struct list_head head;
|
||||
u64 rid;
|
||||
int which;
|
||||
};
|
||||
|
||||
static struct recov_pending *next_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
{
|
||||
struct recov_pending *pend;
|
||||
|
||||
list_for_each_entry(pend, &recinf->pending, head) {
|
||||
if (pend->rid > rid && pend->which & which)
|
||||
return pend;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct recov_pending *lookup_pending(struct recov_info *recinf, u64 rid, int which)
|
||||
{
|
||||
struct recov_pending *pend;
|
||||
|
||||
pend = next_pending(recinf, rid - 1, which);
|
||||
if (pend && pend->rid == rid)
|
||||
return pend;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We keep the pending list sorted by rid so that we can iterate over
|
||||
* them. The list should be small and shouldn't be used often.
|
||||
*/
|
||||
static int cmp_pending_rid(void *priv, KC_LIST_CMP_CONST struct list_head *A, KC_LIST_CMP_CONST struct list_head *B)
|
||||
{
|
||||
KC_LIST_CMP_CONST struct recov_pending *a = list_entry(A, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
KC_LIST_CMP_CONST struct recov_pending *b = list_entry(B, KC_LIST_CMP_CONST struct recov_pending, head);
|
||||
|
||||
return scoutfs_cmp_u64s(a->rid, b->rid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that we'll be waiting for a client to recover something.
|
||||
* _finished will eventually be called for every _prepare, either
|
||||
* because recovery naturally finished or because it timed out and the
|
||||
* server evicted the client.
|
||||
*/
|
||||
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *alloc;
|
||||
struct recov_pending *pend;
|
||||
|
||||
if (WARN_ON_ONCE(which & SCOUTFS_RECOV_INVALID))
|
||||
return -EINVAL;
|
||||
|
||||
alloc = kmalloc(sizeof(*pend), GFP_NOFS);
|
||||
if (!alloc)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
pend = lookup_pending(recinf, rid, SCOUTFS_RECOV_ALL);
|
||||
if (pend) {
|
||||
pend->which |= which;
|
||||
} else {
|
||||
swap(pend, alloc);
|
||||
pend->rid = rid;
|
||||
pend->which = which;
|
||||
list_add_tail(&pend->head, &recinf->pending);
|
||||
list_sort(NULL, &recinf->pending, cmp_pending_rid);
|
||||
}
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
kfree(alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Recovery is only finished once we've begun (which sets the timer) and
|
||||
* all clients have finished. If we didn't test the timer we could
|
||||
* claim it finished prematurely as clients are being prepared.
|
||||
*/
|
||||
static int recov_finished(struct recov_info *recinf)
|
||||
{
|
||||
return !!(recinf->timeout_fn != NULL && list_empty(&recinf->pending));
|
||||
}
|
||||
|
||||
static void timer_callback(struct timer_list *timer)
|
||||
{
|
||||
struct recov_info *recinf = from_timer(recinf, timer, timer);
|
||||
|
||||
recinf->timeout_fn(recinf->sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Begin waiting for recovery once we've prepared all the clients. If
|
||||
* the timeout period elapses before _finish is called on all prepared
|
||||
* clients then the timer will call the callback.
|
||||
*
|
||||
* Returns > 0 if all the prepared clients finish recovery before begin
|
||||
* is called.
|
||||
*/
|
||||
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
|
||||
unsigned int timeout_ms)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
int ret;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
recinf->timeout_fn = timeout_fn;
|
||||
recinf->timer.expires = jiffies + msecs_to_jiffies(timeout_ms);
|
||||
add_timer(&recinf->timer);
|
||||
|
||||
ret = recov_finished(recinf);
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
if (ret > 0)
|
||||
del_timer_sync(&recinf->timer);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A given client has recovered the given state. If it's finished all
|
||||
* recovery then we free it, and if all clients have finished recovery
|
||||
* then we cancel the timeout timer.
|
||||
*
|
||||
* Returns > 0 if _begin has been called and all clients have finished.
|
||||
* The caller will only see > 0 returned once.
|
||||
*/
|
||||
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
|
||||
pend = lookup_pending(recinf, rid, which);
|
||||
if (pend) {
|
||||
pend->which &= ~which;
|
||||
if (pend->which) {
|
||||
pend = NULL;
|
||||
} else {
|
||||
list_del(&pend->head);
|
||||
ret = recov_finished(recinf);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
if (ret > 0)
|
||||
del_timer_sync(&recinf->timer);
|
||||
|
||||
kfree(pend);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the given client is still trying to recover
|
||||
* the given state.
|
||||
*/
|
||||
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
bool is_pending;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
is_pending = lookup_pending(recinf, rid, which) != NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
return is_pending;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the next rid after the given rid of a client waiting for the
|
||||
* given state to be recovered. Start with rid 0, returns 0 when there
|
||||
* are no more clients waiting for recovery.
|
||||
*
|
||||
* This is inherently racey. Callers are responsible for resolving any
|
||||
* actions taken based on pending with the recovery finishing, perhaps
|
||||
* before we return.
|
||||
*/
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, u64 rid, int which)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
pend = next_pending(recinf, rid, which);
|
||||
rid = pend ? pend->rid : 0;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
return rid;
|
||||
}
|
||||
|
||||
/*
|
||||
* The server is shutting down and doesn't need to worry about recovery
|
||||
* anymore. It'll be built up again by the next server, if needed.
|
||||
*/
|
||||
void scoutfs_recov_shutdown(struct super_block *sb)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct recov_pending *pend;
|
||||
struct recov_pending *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
del_timer_sync(&recinf->timer);
|
||||
|
||||
spin_lock(&recinf->lock);
|
||||
list_splice_init(&recinf->pending, &list);
|
||||
recinf->timeout_fn = NULL;
|
||||
spin_unlock(&recinf->lock);
|
||||
|
||||
list_for_each_entry_safe(pend, tmp, &list, head) {
|
||||
list_del(&pend->head);
|
||||
kfree(pend);
|
||||
}
|
||||
}
|
||||
|
||||
int scoutfs_recov_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct recov_info *recinf;
|
||||
int ret;
|
||||
|
||||
recinf = kzalloc(sizeof(struct recov_info), GFP_KERNEL);
|
||||
if (!recinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
recinf->sb = sb;
|
||||
spin_lock_init(&recinf->lock);
|
||||
INIT_LIST_HEAD(&recinf->pending);
|
||||
timer_setup(&recinf->timer, timer_callback, 0);
|
||||
|
||||
sbi->recov_info = recinf;
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_recov_destroy(struct super_block *sb)
|
||||
{
|
||||
DECLARE_RECOV_INFO(sb, recinf);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (recinf) {
|
||||
scoutfs_recov_shutdown(sb);
|
||||
|
||||
kfree(recinf);
|
||||
sbi->recov_info = NULL;
|
||||
}
|
||||
}
|
||||
23
kmod/src/recov.h
Normal file
23
kmod/src/recov.h
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef _SCOUTFS_RECOV_H_
|
||||
#define _SCOUTFS_RECOV_H_
|
||||
|
||||
enum {
|
||||
SCOUTFS_RECOV_GREETING = ( 1 << 0),
|
||||
SCOUTFS_RECOV_LOCKS = ( 1 << 1),
|
||||
|
||||
SCOUTFS_RECOV_INVALID = (~0 << 2),
|
||||
SCOUTFS_RECOV_ALL = (~SCOUTFS_RECOV_INVALID),
|
||||
};
|
||||
|
||||
int scoutfs_recov_prepare(struct super_block *sb, u64 rid, int which);
|
||||
int scoutfs_recov_begin(struct super_block *sb, void (*timeout_fn)(struct super_block *),
|
||||
unsigned int timeout_ms);
|
||||
int scoutfs_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
bool scoutfs_recov_is_pending(struct super_block *sb, u64 rid, int which);
|
||||
u64 scoutfs_recov_next_pending(struct super_block *sb, u64 rid, int which);
|
||||
void scoutfs_recov_shutdown(struct super_block *sb);
|
||||
|
||||
int scoutfs_recov_setup(struct super_block *sb);
|
||||
void scoutfs_recov_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
3721
kmod/src/server.c
3721
kmod/src/server.c
File diff suppressed because it is too large
Load Diff
@@ -56,22 +56,31 @@ do { \
|
||||
__entry->name##_data_len, __entry->name##_cmd, __entry->name##_flags, \
|
||||
__entry->name##_error
|
||||
|
||||
u64 scoutfs_server_reserved_meta_blocks(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_lock_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_net_lock_grant_response *gr);
|
||||
struct scoutfs_net_lock *nl);
|
||||
int scoutfs_server_lock_recover_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_key *key);
|
||||
void scoutfs_server_get_roots(struct super_block *sb,
|
||||
struct scoutfs_net_roots *roots);
|
||||
int scoutfs_server_hold_commit(struct super_block *sb);
|
||||
int scoutfs_server_apply_commit(struct super_block *sb, int err);
|
||||
void scoutfs_server_recov_finish(struct super_block *sb, u64 rid, int which);
|
||||
|
||||
struct sockaddr_in;
|
||||
struct scoutfs_quorum_elected_info;
|
||||
int scoutfs_server_start(struct super_block *sb, u64 term);
|
||||
void scoutfs_server_abort(struct super_block *sb);
|
||||
int scoutfs_server_send_omap_request(struct super_block *sb, u64 rid,
|
||||
struct scoutfs_open_ino_map_args *args);
|
||||
int scoutfs_server_send_omap_response(struct super_block *sb, u64 rid, u64 id,
|
||||
struct scoutfs_open_ino_map *map, int err);
|
||||
|
||||
u64 scoutfs_server_seq(struct super_block *sb);
|
||||
u64 scoutfs_server_next_seq(struct super_block *sb);
|
||||
void scoutfs_server_set_seq_if_greater(struct super_block *sb, u64 seq);
|
||||
|
||||
void scoutfs_server_start(struct super_block *sb, struct scoutfs_quorum_config *qconf, u64 term);
|
||||
void scoutfs_server_stop(struct super_block *sb);
|
||||
void scoutfs_server_stop_wait(struct super_block *sb);
|
||||
bool scoutfs_server_is_running(struct super_block *sb);
|
||||
bool scoutfs_server_is_up(struct super_block *sb);
|
||||
bool scoutfs_server_is_down(struct super_block *sb);
|
||||
|
||||
int scoutfs_server_setup(struct super_block *sb);
|
||||
void scoutfs_server_destroy(struct super_block *sb);
|
||||
|
||||
264
kmod/src/srch.c
264
kmod/src/srch.c
@@ -18,6 +18,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "format.h"
|
||||
@@ -28,7 +29,11 @@
|
||||
#include "btree.h"
|
||||
#include "spbm.h"
|
||||
#include "client.h"
|
||||
#include "counters.h"
|
||||
#include "scoutfs_trace.h"
|
||||
#include "triggers.h"
|
||||
#include "sysfs.h"
|
||||
#include "msg.h"
|
||||
|
||||
/*
|
||||
* This srch subsystem gives us a way to find inodes that have a given
|
||||
@@ -67,10 +72,14 @@ struct srch_info {
|
||||
atomic_t shutdown;
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work compact_dwork;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
atomic_t compact_delay_ms;
|
||||
};
|
||||
|
||||
#define DECLARE_SRCH_INFO(sb, name) \
|
||||
struct srch_info *name = SCOUTFS_SB(sb)->srch_info
|
||||
#define DECLARE_SRCH_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_SRCH_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
#define SRE_FMT "%016llx.%llu.%llu"
|
||||
#define SRE_ARG(sre) \
|
||||
@@ -519,6 +528,95 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Padded entries are encoded in pairs after an existing entry. All of
|
||||
* the pairs cancel each other out by all readers (the second encoding
|
||||
* looks like deletion) so they aren't visible to the first/last bounds of
|
||||
* the block or file.
|
||||
*/
|
||||
static int append_padded_entry(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb, struct scoutfs_srch_entry *sre)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = encode_entry(srb->entries + le32_to_cpu(srb->entry_bytes),
|
||||
sre, &srb->tail);
|
||||
if (ret > 0) {
|
||||
srb->tail = *sre;
|
||||
le32_add_cpu(&srb->entry_nr, 1);
|
||||
le32_add_cpu(&srb->entry_bytes, ret);
|
||||
le64_add_cpu(&sfl->entries, 1);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called by a testing trigger to create a very specific case of
|
||||
* encoded entry offsets. We want the last entry in the block to start
|
||||
* precisely at the _SAFE_BYTES offset.
|
||||
*
|
||||
* This is called when there is a single existing entry in the block.
|
||||
* We have the entire block to work with. We encode pairs of matching
|
||||
* entries. This hides them from readers (both searches and merging) as
|
||||
* they're interpreted as creation and deletion and are deleted. We use
|
||||
* the existing hash value of the first entry in the block but then set
|
||||
* the inode to an impossibly large number so it doesn't interfere with
|
||||
* anything.
|
||||
*
|
||||
* To hit the specific offset we very carefully manage the amount of
|
||||
* bytes of change between fields in the entry. We know that if we
|
||||
* change all the byte of the ino and id we end up with a 20 byte
|
||||
* (2+8+8,2) encoding of the pair of entries. To have the last entry
|
||||
* start at the _SAFE_POS offset we know that the final 20 byte pair
|
||||
* encoding needs to end at 2 bytes (second entry encoding) after the
|
||||
* _SAFE_POS offset.
|
||||
*
|
||||
* So as we encode pairs we watch the delta of our current offset from
|
||||
* that desired final offset of 2 past _SAFE_POS. If we're a multiple
|
||||
* of 20 away then we encode the full 20 byte pairs. If we're not, then
|
||||
* we drop a byte to encode 19 bytes. That'll slowly change the offset
|
||||
* to be a multiple of 20 again while encoding large entries.
|
||||
*/
|
||||
static void pad_entries_at_safe(struct scoutfs_srch_file *sfl, u64 blk,
|
||||
struct scoutfs_srch_block *srb)
|
||||
{
|
||||
struct scoutfs_srch_entry sre;
|
||||
u32 target;
|
||||
s32 diff;
|
||||
u64 hash;
|
||||
u64 ino;
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
hash = le64_to_cpu(srb->tail.hash);
|
||||
ino = le64_to_cpu(srb->tail.ino) | (1ULL << 62);
|
||||
id = le64_to_cpu(srb->tail.id);
|
||||
|
||||
target = SCOUTFS_SRCH_BLOCK_SAFE_BYTES + 2;
|
||||
|
||||
while ((diff = target - le32_to_cpu(srb->entry_bytes)) > 0) {
|
||||
ino ^= 1ULL << (7 * 8);
|
||||
if (diff % 20 == 0) {
|
||||
id ^= 1ULL << (7 * 8);
|
||||
} else {
|
||||
id ^= 1ULL << (6 * 8);
|
||||
}
|
||||
|
||||
sre.hash = cpu_to_le64(hash);
|
||||
sre.ino = cpu_to_le64(ino);
|
||||
sre.id = cpu_to_le64(id);
|
||||
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
if (ret == 0)
|
||||
ret = append_padded_entry(sfl, blk, srb, &sre);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
diff = target - le32_to_cpu(srb->entry_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is dropping an ino/id because the tracking rbtree is full.
|
||||
* This loses information so we can't return any entries at or after the
|
||||
@@ -860,7 +958,6 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
struct scoutfs_srch_rb_root *sroot,
|
||||
u64 hash, u64 ino, u64 last_ino, bool *done)
|
||||
{
|
||||
struct scoutfs_net_roots prev_roots;
|
||||
struct scoutfs_net_roots roots;
|
||||
struct scoutfs_srch_entry start;
|
||||
struct scoutfs_srch_entry end;
|
||||
@@ -868,6 +965,7 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_srch_file sfl;
|
||||
SCOUTFS_BTREE_ITEM_REF(iref);
|
||||
DECLARE_SAVED_REFS(saved);
|
||||
struct scoutfs_key key;
|
||||
unsigned long limit = SRCH_LIMIT;
|
||||
int ret;
|
||||
@@ -876,7 +974,6 @@ int scoutfs_srch_search_xattrs(struct super_block *sb,
|
||||
|
||||
*done = false;
|
||||
srch_init_rb_root(sroot);
|
||||
memset(&prev_roots, 0, sizeof(prev_roots));
|
||||
|
||||
start.hash = cpu_to_le64(hash);
|
||||
start.ino = cpu_to_le64(ino);
|
||||
@@ -891,7 +988,6 @@ retry:
|
||||
ret = scoutfs_client_get_roots(sb, &roots);
|
||||
if (ret)
|
||||
goto out;
|
||||
memset(&roots.fs_root, 0, sizeof(roots.fs_root));
|
||||
|
||||
end = final;
|
||||
|
||||
@@ -967,16 +1063,10 @@ retry:
|
||||
*done = sre_cmp(&end, &final) == 0;
|
||||
ret = 0;
|
||||
out:
|
||||
if (ret == -ESTALE) {
|
||||
if (memcmp(&prev_roots, &roots, sizeof(roots)) == 0) {
|
||||
scoutfs_inc_counter(sb, srch_search_stale_eio);
|
||||
ret = -EIO;
|
||||
} else {
|
||||
scoutfs_inc_counter(sb, srch_search_stale_retry);
|
||||
prev_roots = roots;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
ret = scoutfs_block_check_stale(sb, ret, &saved, &roots.srch_root.ref,
|
||||
&roots.logs_root.ref);
|
||||
if (ret == -ESTALE)
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -989,18 +1079,30 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_srch_file *sfl)
|
||||
struct scoutfs_srch_file *sfl, bool force)
|
||||
{
|
||||
struct scoutfs_key key;
|
||||
int ret;
|
||||
|
||||
if (le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT)
|
||||
if (sfl->ref.blkno && !force && scoutfs_trigger(sb, SRCH_FORCE_LOG_ROTATE))
|
||||
force = true;
|
||||
|
||||
if (sfl->ref.blkno == 0 ||
|
||||
(!force && le64_to_cpu(sfl->blocks) < SCOUTFS_SRCH_LOG_BLOCK_LIMIT))
|
||||
return 0;
|
||||
|
||||
init_srch_key(&key, SCOUTFS_SRCH_LOG_TYPE,
|
||||
le64_to_cpu(sfl->ref.blkno), 0);
|
||||
ret = scoutfs_btree_insert(sb, alloc, wri, root, &key,
|
||||
sfl, sizeof(*sfl));
|
||||
/*
|
||||
* While it's fine to replay moving the client's logging srch
|
||||
* file to the core btree item, server commits should keep it
|
||||
* from happening. So we'll warn if we see it happen. This can
|
||||
* be removed eventually.
|
||||
*/
|
||||
if (WARN_ON_ONCE(ret == -EEXIST))
|
||||
ret = 0;
|
||||
if (ret == 0) {
|
||||
memset(sfl, 0, sizeof(*sfl));
|
||||
scoutfs_inc_counter(sb, srch_rotate_log);
|
||||
@@ -1460,7 +1562,7 @@ static int kway_merge(struct super_block *sb,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_file *sfl,
|
||||
kway_get_t kway_get, kway_advance_t kway_adv,
|
||||
void **args, int nr)
|
||||
void **args, int nr, bool logs_input)
|
||||
{
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
struct scoutfs_srch_block *srb = NULL;
|
||||
@@ -1480,14 +1582,14 @@ static int kway_merge(struct super_block *sb,
|
||||
int ind;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(nr <= 1))
|
||||
if (WARN_ON_ONCE(nr <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
nr_parents = roundup_pow_of_two(nr) - 1;
|
||||
/* always at least one parent for single leaf */
|
||||
nr_parents = max_t(unsigned long, 1, roundup_pow_of_two(nr) - 1);
|
||||
/* root at [1] for easy sib/parent index calc, final pad for odd sib */
|
||||
nr_nodes = 1 + nr_parents + nr + 1;
|
||||
tnodes = __vmalloc(nr_nodes * sizeof(struct tourn_node),
|
||||
GFP_NOFS, PAGE_KERNEL);
|
||||
tnodes = kc__vmalloc(nr_nodes * sizeof(struct tourn_node), GFP_NOFS);
|
||||
if (!tnodes)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1564,6 +1666,15 @@ static int kway_merge(struct super_block *sb,
|
||||
blk++;
|
||||
}
|
||||
|
||||
/* end sorted block on _SAFE offset for testing */
|
||||
if (bl && le32_to_cpu(srb->entry_nr) == 1 && logs_input &&
|
||||
scoutfs_trigger(sb, SRCH_COMPACT_LOGS_PAD_SAFE)) {
|
||||
pad_entries_at_safe(sfl, blk, srb);
|
||||
scoutfs_block_put(sb, bl);
|
||||
bl = NULL;
|
||||
blk++;
|
||||
}
|
||||
|
||||
scoutfs_inc_counter(sb, srch_compact_entry);
|
||||
|
||||
} else {
|
||||
@@ -1606,6 +1717,8 @@ static int kway_merge(struct super_block *sb,
|
||||
empty++;
|
||||
ret = 0;
|
||||
} else if (ret < 0) {
|
||||
if (ret == -ENOANO) /* just testing trigger */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1744,7 +1857,7 @@ static int compact_logs(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
page->private = 0;
|
||||
list_add_tail(&page->list, &pages);
|
||||
list_add_tail(&page->lru, &pages);
|
||||
nr_pages++;
|
||||
scoutfs_inc_counter(sb, srch_compact_log_page);
|
||||
}
|
||||
@@ -1797,7 +1910,7 @@ static int compact_logs(struct super_block *sb,
|
||||
|
||||
/* sort page entries and reset private for _next */
|
||||
i = 0;
|
||||
list_for_each_entry(page, &pages, list) {
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
args[i++] = page;
|
||||
|
||||
if (atomic_read(&srinf->shutdown)) {
|
||||
@@ -1813,12 +1926,12 @@ static int compact_logs(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_page, kway_adv_page,
|
||||
args, nr_pages);
|
||||
args, nr_pages, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* make sure we finished all the pages */
|
||||
list_for_each_entry(page, &pages, list) {
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
sre = page_priv_sre(page);
|
||||
if (page->private < SRES_PER_PAGE && sre->ino != 0) {
|
||||
ret = -ENOSPC;
|
||||
@@ -1831,8 +1944,8 @@ static int compact_logs(struct super_block *sb,
|
||||
out:
|
||||
scoutfs_block_put(sb, bl);
|
||||
vfree(args);
|
||||
list_for_each_entry_safe(page, tmp, &pages, list) {
|
||||
list_del(&page->list);
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
@@ -1871,12 +1984,18 @@ static int kway_get_reader(struct super_block *sb,
|
||||
srb = rdr->bl->data;
|
||||
|
||||
if (rdr->pos > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip > SCOUTFS_SRCH_BLOCK_SAFE_BYTES ||
|
||||
rdr->skip >= le32_to_cpu(srb->entry_bytes)) {
|
||||
/* XXX inconsistency */
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rdr->decoded_bytes == 0 && rdr->pos == SCOUTFS_SRCH_BLOCK_SAFE_BYTES &&
|
||||
scoutfs_trigger(sb, SRCH_MERGE_STOP_SAFE)) {
|
||||
/* only used in testing */
|
||||
return -ENOANO;
|
||||
}
|
||||
|
||||
/* decode entry, possibly skipping start of the block */
|
||||
while (rdr->decoded_bytes == 0 || rdr->pos < rdr->skip) {
|
||||
ret = decode_entry(srb->entries + rdr->pos,
|
||||
@@ -1966,7 +2085,7 @@ static int compact_sorted(struct super_block *sb,
|
||||
}
|
||||
|
||||
ret = kway_merge(sb, alloc, wri, &sc->out, kway_get_reader,
|
||||
kway_adv_reader, args, nr);
|
||||
kway_adv_reader, args, nr, false);
|
||||
|
||||
sc->flags |= SCOUTFS_SRCH_COMPACT_FLAG_DONE;
|
||||
for (i = 0; i < nr; i++) {
|
||||
@@ -2080,7 +2199,7 @@ static int delete_files(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_srch_compact *sc)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sc->nr; i++) {
|
||||
@@ -2095,8 +2214,15 @@ static int delete_files(struct super_block *sb, struct scoutfs_alloc *alloc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* wait 10s between compact attempts on error, immediate after success */
|
||||
#define SRCH_COMPACT_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
static void queue_compact_work(struct srch_info *srinf, bool immediate)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = immediate ? 0 : msecs_to_jiffies(atomic_read(&srinf->compact_delay_ms));
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a compaction operation from the server, sort the entries from the
|
||||
@@ -2124,8 +2250,8 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
struct super_block *sb = srinf->sb;
|
||||
struct scoutfs_block_writer wri;
|
||||
struct scoutfs_alloc alloc;
|
||||
unsigned long delay;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
sc = kmalloc(sizeof(struct scoutfs_srch_compact), GFP_NOFS);
|
||||
if (sc == NULL) {
|
||||
@@ -2136,6 +2262,8 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
scoutfs_block_writer_init(sb, &wri);
|
||||
|
||||
ret = scoutfs_client_srch_get_compact(sb, sc);
|
||||
if (ret >= 0)
|
||||
trace_scoutfs_srch_compact_client_recv(sb, sc);
|
||||
if (ret < 0 || sc->nr == 0)
|
||||
goto out;
|
||||
|
||||
@@ -2156,27 +2284,75 @@ static void scoutfs_srch_compact_worker(struct work_struct *work)
|
||||
if (ret < 0)
|
||||
goto commit;
|
||||
|
||||
ret = scoutfs_block_writer_write(sb, &wri);
|
||||
ret = scoutfs_alloc_prepare_commit(sb, &alloc, &wri) ?:
|
||||
scoutfs_block_writer_write(sb, &wri);
|
||||
commit:
|
||||
/* the server won't use our partial compact if _ERROR is set */
|
||||
sc->meta_avail = alloc.avail;
|
||||
sc->meta_freed = alloc.freed;
|
||||
sc->flags |= ret < 0 ? SCOUTFS_SRCH_COMPACT_FLAG_ERROR : 0;
|
||||
|
||||
ret = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
trace_scoutfs_srch_compact_client_send(sb, sc);
|
||||
err = scoutfs_client_srch_commit_compact(sb, sc);
|
||||
if (err < 0 && ret == 0)
|
||||
ret = err;
|
||||
out:
|
||||
/* our allocators and files should be stable */
|
||||
WARN_ON_ONCE(ret == -ESTALE);
|
||||
if (ret < 0)
|
||||
scoutfs_inc_counter(sb, srch_compact_error);
|
||||
|
||||
scoutfs_block_writer_forget_all(sb, &wri);
|
||||
if (!atomic_read(&srinf->shutdown)) {
|
||||
delay = ret == 0 ? 0 : msecs_to_jiffies(SRCH_COMPACT_DELAY_MS);
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork, delay);
|
||||
}
|
||||
queue_compact_work(srinf, sc->nr > 0 && ret == 0);
|
||||
|
||||
kfree(sc);
|
||||
}
|
||||
|
||||
static ssize_t compact_delay_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_SRCH_INFO_KOBJ(kobj, srinf);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u", atomic_read(&srinf->compact_delay_ms));
|
||||
}
|
||||
|
||||
#define MIN_COMPACT_DELAY_MS MSEC_PER_SEC
|
||||
#define DEF_COMPACT_DELAY_MS (10 * MSEC_PER_SEC)
|
||||
#define MAX_COMPACT_DELAY_MS (60 * MSEC_PER_SEC)
|
||||
|
||||
static ssize_t compact_delay_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
DECLARE_SRCH_INFO(sb, srinf);
|
||||
char nullterm[30]; /* more than enough for octal -U64_MAX */
|
||||
u64 val;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(nullterm) - 1);
|
||||
memcpy(nullterm, buf, len);
|
||||
nullterm[len] = '\0';
|
||||
|
||||
ret = kstrtoll(nullterm, 0, &val);
|
||||
if (ret < 0 || val < MIN_COMPACT_DELAY_MS || val > MAX_COMPACT_DELAY_MS) {
|
||||
scoutfs_err(sb, "invalid compact_delay_ms value, must be between %lu and %lu",
|
||||
MIN_COMPACT_DELAY_MS, MAX_COMPACT_DELAY_MS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_set(&srinf->compact_delay_ms, val);
|
||||
cancel_delayed_work(&srinf->compact_dwork);
|
||||
queue_compact_work(srinf, false);
|
||||
|
||||
return count;
|
||||
}
|
||||
SCOUTFS_ATTR_RW(compact_delay_ms);
|
||||
|
||||
static struct attribute *srch_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(compact_delay_ms),
|
||||
NULL,
|
||||
};
|
||||
|
||||
void scoutfs_srch_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
@@ -2193,6 +2369,8 @@ void scoutfs_srch_destroy(struct super_block *sb)
|
||||
destroy_workqueue(srinf->workq);
|
||||
}
|
||||
|
||||
scoutfs_sysfs_destroy_attrs(sb, &srinf->ssa);
|
||||
|
||||
kfree(srinf);
|
||||
sbi->srch_info = NULL;
|
||||
}
|
||||
@@ -2210,8 +2388,15 @@ int scoutfs_srch_setup(struct super_block *sb)
|
||||
srinf->sb = sb;
|
||||
atomic_set(&srinf->shutdown, 0);
|
||||
INIT_DELAYED_WORK(&srinf->compact_dwork, scoutfs_srch_compact_worker);
|
||||
scoutfs_sysfs_init_attrs(sb, &srinf->ssa);
|
||||
atomic_set(&srinf->compact_delay_ms, DEF_COMPACT_DELAY_MS);
|
||||
|
||||
sbi->srch_info = srinf;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &srinf->ssa, srch_attrs, "srch");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
srinf->workq = alloc_workqueue("scoutfs_srch_compact",
|
||||
WQ_NON_REENTRANT | WQ_UNBOUND |
|
||||
WQ_HIGHPRI, 0);
|
||||
@@ -2220,8 +2405,7 @@ int scoutfs_srch_setup(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue_delayed_work(srinf->workq, &srinf->compact_dwork,
|
||||
msecs_to_jiffies(SRCH_COMPACT_DELAY_MS));
|
||||
queue_compact_work(srinf, false);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
@@ -37,7 +37,7 @@ int scoutfs_srch_rotate_log(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
struct scoutfs_btree_root *root,
|
||||
struct scoutfs_srch_file *sfl);
|
||||
struct scoutfs_srch_file *sfl, bool force);
|
||||
int scoutfs_srch_get_compact(struct super_block *sb,
|
||||
struct scoutfs_alloc *alloc,
|
||||
struct scoutfs_block_writer *wri,
|
||||
|
||||
406
kmod/src/super.c
406
kmod/src/super.c
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/magic.h>
|
||||
@@ -20,7 +21,6 @@
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "block.h"
|
||||
@@ -44,70 +44,45 @@
|
||||
#include "srch.h"
|
||||
#include "item.h"
|
||||
#include "alloc.h"
|
||||
#include "recov.h"
|
||||
#include "omap.h"
|
||||
#include "volopt.h"
|
||||
#include "fence.h"
|
||||
#include "xattr.h"
|
||||
#include "wkic.h"
|
||||
#include "quota.h"
|
||||
#include "scoutfs_trace.h"
|
||||
|
||||
static struct dentry *scoutfs_debugfs_root;
|
||||
|
||||
static DEFINE_PER_CPU(u64, clock_sync_ids) = 0;
|
||||
|
||||
/*
|
||||
* Give the caller a unique clock sync id for a message they're about to
|
||||
* send. We make the ids reasonably globally unique by using randomly
|
||||
* initialized per-cpu 64bit counters.
|
||||
*/
|
||||
__le64 scoutfs_clock_sync_id(void)
|
||||
/* the statfs file fields can be small (and signed?) :/ */
|
||||
static __statfs_word saturate_truncated_word(u64 files)
|
||||
{
|
||||
u64 rnd = 0;
|
||||
u64 ret;
|
||||
u64 *id;
|
||||
__statfs_word word = files;
|
||||
|
||||
retry:
|
||||
preempt_disable();
|
||||
id = this_cpu_ptr(&clock_sync_ids);
|
||||
if (*id == 0) {
|
||||
if (rnd == 0) {
|
||||
preempt_enable();
|
||||
get_random_bytes(&rnd, sizeof(rnd));
|
||||
goto retry;
|
||||
}
|
||||
*id = rnd;
|
||||
if (word != files) {
|
||||
word = ~0ULL;
|
||||
if (word < 0)
|
||||
word = (unsigned long)word >> 1;
|
||||
}
|
||||
|
||||
ret = ++(*id);
|
||||
preempt_enable();
|
||||
|
||||
return cpu_to_le64(ret);
|
||||
}
|
||||
|
||||
struct statfs_free_blocks {
|
||||
u64 meta;
|
||||
u64 data;
|
||||
};
|
||||
|
||||
static int count_free_blocks(struct super_block *sb, void *arg, int owner,
|
||||
u64 id, bool meta, bool avail, u64 blocks)
|
||||
{
|
||||
struct statfs_free_blocks *sfb = arg;
|
||||
|
||||
if (meta)
|
||||
sfb->meta += blocks;
|
||||
else
|
||||
sfb->data += blocks;
|
||||
|
||||
return 0;
|
||||
return word;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build the free block counts by having alloc read all the persistent
|
||||
* blocks which contain allocators and calling us for each of them.
|
||||
* Only the super block reads aren't cached so repeatedly calling statfs
|
||||
* is like repeated O_DIRECT IO. We can add a cache and stale results
|
||||
* if that IO becomes a problem.
|
||||
* The server gives us the current sum of free blocks and the total
|
||||
* inode count that it can see across all the clients' log trees. It
|
||||
* won't see allocations and inode creations or deletions that are dirty
|
||||
* in client memory as it builds a transaction.
|
||||
*
|
||||
* We fake the number of free inodes value by assuming that we can fill
|
||||
* free blocks with a certain number of inodes. We then the number of
|
||||
* current inodes to that free count to determine the total possible
|
||||
* inodes.
|
||||
* We don't have static limits on the number of files so the statfs
|
||||
* fields for the total possible files and the number free isn't
|
||||
* particularly helpful. What we do want to report is the number of
|
||||
* inodes, so we fake a max possible number of inodes given a
|
||||
* conservative estimate of the total space consumption per file and
|
||||
* then find the free by subtracting our precise count of active inodes.
|
||||
* This seems like the least surprising compromise where the file max
|
||||
* doesn't change and the caller gets the correct count of used inodes.
|
||||
*
|
||||
* The fsid that we report is constructed from the xor of the first two
|
||||
* and second two little endian u32s that make up the uuid bytes.
|
||||
@@ -115,41 +90,33 @@ static int count_free_blocks(struct super_block *sb, void *arg, int owner,
|
||||
static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
|
||||
{
|
||||
struct super_block *sb = dentry->d_inode->i_sb;
|
||||
struct scoutfs_super_block *super = NULL;
|
||||
struct statfs_free_blocks sfb = {0,};
|
||||
struct scoutfs_net_statfs nst;
|
||||
u64 files;
|
||||
u64 ffree;
|
||||
__le32 uuid[4];
|
||||
int ret;
|
||||
|
||||
scoutfs_inc_counter(sb, statfs);
|
||||
|
||||
super = kzalloc(sizeof(struct scoutfs_super_block), GFP_NOFS);
|
||||
if (!super) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_read_super(sb, super);
|
||||
ret = scoutfs_client_statfs(sb, &nst);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = scoutfs_alloc_foreach(sb, count_free_blocks, &sfb);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
kst->f_bfree = (sfb.meta << SCOUTFS_BLOCK_SM_LG_SHIFT) + sfb.data;
|
||||
kst->f_bfree = (le64_to_cpu(nst.free_meta_blocks) << SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(nst.free_data_blocks);
|
||||
kst->f_type = SCOUTFS_SUPER_MAGIC;
|
||||
kst->f_bsize = SCOUTFS_BLOCK_SM_SIZE;
|
||||
kst->f_blocks = (le64_to_cpu(super->total_meta_blocks) <<
|
||||
SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(super->total_data_blocks);
|
||||
kst->f_blocks = (le64_to_cpu(nst.total_meta_blocks) << SCOUTFS_BLOCK_SM_LG_SHIFT) +
|
||||
le64_to_cpu(nst.total_data_blocks);
|
||||
kst->f_bavail = kst->f_bfree;
|
||||
|
||||
/* arbitrarily assume ~1K / empty file */
|
||||
kst->f_ffree = sfb.meta * (SCOUTFS_BLOCK_LG_SIZE / 1024);
|
||||
kst->f_files = kst->f_ffree + le64_to_cpu(super->next_ino);
|
||||
files = div_u64(le64_to_cpu(nst.total_meta_blocks) << SCOUTFS_BLOCK_LG_SHIFT, 2048);
|
||||
ffree = files - le64_to_cpu(nst.inode_count);
|
||||
kst->f_files = saturate_truncated_word(files);
|
||||
kst->f_ffree = saturate_truncated_word(ffree);
|
||||
|
||||
BUILD_BUG_ON(sizeof(uuid) != sizeof(super->uuid));
|
||||
memcpy(uuid, super->uuid, sizeof(uuid));
|
||||
BUILD_BUG_ON(sizeof(uuid) != sizeof(nst.uuid));
|
||||
memcpy(uuid, nst.uuid, sizeof(uuid));
|
||||
kst->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[1]);
|
||||
kst->f_fsid.val[1] = le32_to_cpu(uuid[2]) ^ le32_to_cpu(uuid[3]);
|
||||
kst->f_namelen = SCOUTFS_NAME_LEN;
|
||||
@@ -158,57 +125,17 @@ static int scoutfs_statfs(struct dentry *dentry, struct kstatfs *kst)
|
||||
/* the vfs fills f_flags */
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(super);
|
||||
|
||||
/*
|
||||
* We don't take cluster locks in statfs which makes it a very
|
||||
* convenient place to trigger lock reclaim for debugging. We
|
||||
* try to free as many locks as possible.
|
||||
*/
|
||||
if (scoutfs_trigger(sb, STATFS_LOCK_PURGE))
|
||||
scoutfs_free_unused_locks(sb, -1UL);
|
||||
scoutfs_free_unused_locks(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scoutfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
if (opts->quorum_slot_nr >= 0)
|
||||
seq_printf(seq, ",quorum_slot_nr=%d", opts->quorum_slot_nr);
|
||||
seq_printf(seq, ",metadev_path=%s", opts->metadev_path);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t metadev_path_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s", opts->metadev_path);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(metadev_path);
|
||||
|
||||
static ssize_t quorum_server_nr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = SCOUTFS_SYSFS_ATTRS_SB(kobj);
|
||||
struct mount_options *opts = &SCOUTFS_SB(sb)->opts;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", opts->quorum_slot_nr);
|
||||
}
|
||||
SCOUTFS_ATTR_RO(quorum_server_nr);
|
||||
|
||||
static struct attribute *mount_options_attrs[] = {
|
||||
SCOUTFS_ATTR_PTR(metadev_path),
|
||||
SCOUTFS_ATTR_PTR(quorum_server_nr),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int scoutfs_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
trace_scoutfs_sync_fs(sb, wait);
|
||||
@@ -226,7 +153,25 @@ static void scoutfs_metadev_close(struct super_block *sb)
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi->meta_bdev) {
|
||||
/*
|
||||
* Some kernels have blkdev_reread_part which calls
|
||||
* fsync_bdev while holding the bd_mutex which inverts
|
||||
* the s_umount hold in deactivate_super and blkdev_put
|
||||
* from kill_sb->put_super.
|
||||
*/
|
||||
lockdep_off();
|
||||
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
bdev_fput(sbi->meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
blkdev_put(sbi->meta_bdev, sb);
|
||||
#else
|
||||
blkdev_put(sbi->meta_bdev, SCOUTFS_META_BDEV_MODE);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
lockdep_on();
|
||||
sbi->meta_bdev = NULL;
|
||||
}
|
||||
}
|
||||
@@ -243,41 +188,69 @@ static void scoutfs_put_super(struct super_block *sb)
|
||||
|
||||
trace_scoutfs_put_super(sb);
|
||||
|
||||
sbi->shutdown = true;
|
||||
/*
|
||||
* Wait for invalidation and iput to finish with any lingering
|
||||
* inode references that escaped the evict_inodes in
|
||||
* generic_shutdown_super. SB_ACTIVE is clear so final iput
|
||||
* will always evict.
|
||||
*/
|
||||
scoutfs_lock_flush_invalidate(sb);
|
||||
scoutfs_inode_flush_iput(sb);
|
||||
WARN_ON_ONCE(!list_empty(&sb->s_inodes));
|
||||
|
||||
scoutfs_data_destroy(sb);
|
||||
scoutfs_forest_stop(sb);
|
||||
scoutfs_srch_destroy(sb);
|
||||
|
||||
scoutfs_unlock(sb, sbi->rid_lock, SCOUTFS_LOCK_WRITE);
|
||||
sbi->rid_lock = NULL;
|
||||
scoutfs_lock_shutdown(sb);
|
||||
|
||||
scoutfs_shutdown_trans(sb);
|
||||
scoutfs_volopt_destroy(sb);
|
||||
scoutfs_client_destroy(sb);
|
||||
scoutfs_quota_destroy(sb);
|
||||
scoutfs_inode_destroy(sb);
|
||||
scoutfs_wkic_destroy(sb);
|
||||
scoutfs_item_destroy(sb);
|
||||
scoutfs_forest_destroy(sb);
|
||||
scoutfs_data_destroy(sb);
|
||||
|
||||
scoutfs_quorum_destroy(sb);
|
||||
scoutfs_lock_shutdown(sb);
|
||||
scoutfs_server_destroy(sb);
|
||||
scoutfs_recov_destroy(sb);
|
||||
scoutfs_net_destroy(sb);
|
||||
scoutfs_lock_destroy(sb);
|
||||
scoutfs_omap_destroy(sb);
|
||||
|
||||
scoutfs_block_destroy(sb);
|
||||
scoutfs_destroy_triggers(sb);
|
||||
scoutfs_fence_destroy(sb);
|
||||
scoutfs_options_destroy(sb);
|
||||
scoutfs_sysfs_destroy_attrs(sb, &sbi->mopts_ssa);
|
||||
debugfs_remove(sbi->debug_root);
|
||||
scoutfs_destroy_counters(sb);
|
||||
scoutfs_destroy_sysfs(sb);
|
||||
scoutfs_metadev_close(sb);
|
||||
|
||||
kfree(sbi->opts.metadev_path);
|
||||
kfree(sbi);
|
||||
|
||||
sb->s_fs_info = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that we're performing a forced unmount. As put_super drives
|
||||
* destruction of the filesystem we won't issue more network or storage
|
||||
* operations because we assume that they'll hang. Pending operations
|
||||
* can return errors when it's possible to do so. We may be racing with
|
||||
* pending operations which can't be canceled.
|
||||
*/
|
||||
static void scoutfs_umount_begin(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
scoutfs_warn(sb, "forcing unmount, can return errors and lose unsynced data");
|
||||
sbi->forced_unmount = true;
|
||||
|
||||
scoutfs_client_net_shutdown(sb);
|
||||
}
|
||||
|
||||
static const struct super_operations scoutfs_super_ops = {
|
||||
.alloc_inode = scoutfs_alloc_inode,
|
||||
.drop_inode = scoutfs_drop_inode,
|
||||
@@ -285,8 +258,9 @@ static const struct super_operations scoutfs_super_ops = {
|
||||
.destroy_inode = scoutfs_destroy_inode,
|
||||
.sync_fs = scoutfs_sync_fs,
|
||||
.statfs = scoutfs_statfs,
|
||||
.show_options = scoutfs_show_options,
|
||||
.show_options = scoutfs_options_show,
|
||||
.put_super = scoutfs_put_super,
|
||||
.umount_begin = scoutfs_umount_begin,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -306,28 +280,16 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
sizeof(struct scoutfs_super_block));
|
||||
}
|
||||
|
||||
static bool invalid_blkno_limits(struct super_block *sb, char *which,
|
||||
u64 start, __le64 first, __le64 last,
|
||||
struct block_device *bdev, int shift)
|
||||
static bool small_bdev(struct super_block *sb, char *which, u64 blocks,
|
||||
struct block_device *bdev, int shift)
|
||||
{
|
||||
u64 blkno;
|
||||
u64 size = (u64)i_size_read(bdev->bd_inode);
|
||||
u64 count = size >> shift;
|
||||
|
||||
if (le64_to_cpu(first) < start) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is within first valid blkno %llu",
|
||||
which, le64_to_cpu(first), start);
|
||||
return true;
|
||||
}
|
||||
if (blocks > count) {
|
||||
scoutfs_err(sb, "super block records %llu %s blocks, but device %u:%u size %llu only allows %llu blocks",
|
||||
blocks, which, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev), size, count);
|
||||
|
||||
if (le64_to_cpu(first) > le64_to_cpu(last)) {
|
||||
scoutfs_err(sb, "super block first %s blkno %llu is greater than last %s blkno %llu",
|
||||
which, le64_to_cpu(first), which, le64_to_cpu(last));
|
||||
return true;
|
||||
}
|
||||
|
||||
blkno = (i_size_read(bdev->bd_inode) >> shift) - 1;
|
||||
if (le64_to_cpu(last) > blkno) {
|
||||
scoutfs_err(sb, "super block last %s blkno %llu is beyond device size last blkno %llu",
|
||||
which, le64_to_cpu(last), blkno);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -376,27 +338,32 @@ static int scoutfs_read_super_from_bdev(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(super->fmt_vers) < SCOUTFS_FORMAT_VERSION_MIN ||
|
||||
le64_to_cpu(super->fmt_vers) > SCOUTFS_FORMAT_VERSION_MAX) {
|
||||
scoutfs_err(sb, "super block has format version %llu outside of supported version range %u-%u",
|
||||
le64_to_cpu(super->fmt_vers), SCOUTFS_FORMAT_VERSION_MIN,
|
||||
SCOUTFS_FORMAT_VERSION_MAX);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (super->version != cpu_to_le64(SCOUTFS_INTEROP_VERSION)) {
|
||||
scoutfs_err(sb, "super block has invalid version %llu, expected %llu",
|
||||
le64_to_cpu(super->version),
|
||||
SCOUTFS_INTEROP_VERSION);
|
||||
/*
|
||||
* fill_supers checks the fmt_vers in both supers and then decides to use it.
|
||||
* From then on we verify that the supers we read have that version.
|
||||
*/
|
||||
if (sbi->fmt_vers != 0 && le64_to_cpu(super->fmt_vers) != sbi->fmt_vers) {
|
||||
scoutfs_err(sb, "super block has format version %llu than %llu read at mount",
|
||||
le64_to_cpu(super->fmt_vers), sbi->fmt_vers);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX do we want more rigorous invalid super checking? */
|
||||
|
||||
if (invalid_blkno_limits(sb, "meta",
|
||||
SCOUTFS_META_DEV_START_BLKNO,
|
||||
super->first_meta_blkno,
|
||||
super->last_meta_blkno, sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
invalid_blkno_limits(sb, "data",
|
||||
SCOUTFS_DATA_DEV_START_BLKNO,
|
||||
super->first_data_blkno,
|
||||
super->last_data_blkno, sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
if (small_bdev(sb, "metadata", le64_to_cpu(super->total_meta_blocks), sbi->meta_bdev,
|
||||
SCOUTFS_BLOCK_LG_SHIFT) ||
|
||||
small_bdev(sb, "data", le64_to_cpu(super->total_data_blocks), sb->s_bdev,
|
||||
SCOUTFS_BLOCK_SM_SHIFT)) {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -503,7 +470,14 @@ static int scoutfs_read_supers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbi->super = *meta_super;
|
||||
if (le64_to_cpu(meta_super->fmt_vers) != le64_to_cpu(data_super->fmt_vers)) {
|
||||
scoutfs_err(sb, "meta device format version %llu != data device format version %llu",
|
||||
le64_to_cpu(meta_super->fmt_vers), le64_to_cpu(data_super->fmt_vers));
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbi->fsid = le64_to_cpu(meta_super->hdr.fsid);
|
||||
sbi->fmt_vers = le64_to_cpu(meta_super->fmt_vers);
|
||||
out:
|
||||
kfree(meta_super);
|
||||
kfree(data_super);
|
||||
@@ -512,9 +486,13 @@ out:
|
||||
|
||||
static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct mount_options opts;
|
||||
struct scoutfs_mount_options opts;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#else
|
||||
struct block_device *meta_bdev;
|
||||
#endif
|
||||
struct scoutfs_sb_info *sbi;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
@@ -523,7 +501,11 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_magic = SCOUTFS_SUPER_MAGIC;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
sb->s_op = &scoutfs_super_ops;
|
||||
sb->s_d_op = &scoutfs_dentry_ops;
|
||||
sb->s_export_op = &scoutfs_export_ops;
|
||||
sb->s_xattr = scoutfs_xattr_handlers;
|
||||
sb->s_flags |= SB_I_VERSION | SB_POSIXACL;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
/* btree blocks use long lived bh->b_data refs */
|
||||
mapping_set_gfp_mask(sb->s_bdev->bd_inode->i_mapping, GFP_NOFS);
|
||||
@@ -536,22 +518,17 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
ret = assign_random_id(sbi);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&sbi->next_ino_lock);
|
||||
init_waitqueue_head(&sbi->trans_hold_wq);
|
||||
spin_lock_init(&sbi->data_wait_root.lock);
|
||||
sbi->data_wait_root.root = RB_ROOT;
|
||||
spin_lock_init(&sbi->trans_write_lock);
|
||||
INIT_DELAYED_WORK(&sbi->trans_write_work, scoutfs_trans_write_func);
|
||||
init_waitqueue_head(&sbi->trans_write_wq);
|
||||
scoutfs_sysfs_init_attrs(sb, &sbi->mopts_ssa);
|
||||
|
||||
ret = scoutfs_parse_options(sb, data, &opts);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sbi->opts = opts;
|
||||
spin_lock_init(&sbi->next_ino_lock);
|
||||
spin_lock_init(&sbi->data_wait_root.lock);
|
||||
sbi->data_wait_root.root = RB_ROOT;
|
||||
|
||||
/* parse options early for use during setup */
|
||||
ret = scoutfs_options_early_setup(sb, data);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
scoutfs_options_read(sb, &opts);
|
||||
|
||||
ret = sb_set_blocksize(sb, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != SCOUTFS_BLOCK_SM_SIZE) {
|
||||
@@ -560,9 +537,27 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
meta_bdev =
|
||||
blkdev_get_by_path(sbi->opts.metadev_path,
|
||||
SCOUTFS_META_BDEV_MODE, sb);
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
/*
|
||||
* pass sbi as holder, since dev_mount already passes sb, which triggers a
|
||||
* WARN_ON because dev_mount also passes non-NULL hops. By passing sbi
|
||||
* here we just get a simple error in our test cases.
|
||||
*/
|
||||
meta_bdev_file = bdev_file_open_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sbi, NULL);
|
||||
if (IS_ERR(meta_bdev_file)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev_file));
|
||||
ret = PTR_ERR(meta_bdev_file);
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev_file = meta_bdev_file;
|
||||
sbi->meta_bdev = file_bdev(meta_bdev_file);
|
||||
#else
|
||||
#ifdef KC_BLKDEV_PUT_HOLDER_ARG
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb, NULL);
|
||||
#else
|
||||
meta_bdev = blkdev_get_by_path(opts.metadev_path, SCOUTFS_META_BDEV_MODE, sb);
|
||||
#endif
|
||||
if (IS_ERR(meta_bdev)) {
|
||||
scoutfs_err(sb, "could not open metadev: error %ld",
|
||||
PTR_ERR(meta_bdev));
|
||||
@@ -570,6 +565,8 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
sbi->meta_bdev = meta_bdev;
|
||||
#endif
|
||||
|
||||
ret = set_blocksize(sbi->meta_bdev, SCOUTFS_BLOCK_SM_SIZE);
|
||||
if (ret != 0) {
|
||||
scoutfs_err(sb, "failed to set metadev blocksize, returned %d",
|
||||
@@ -582,30 +579,34 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
scoutfs_setup_sysfs(sb) ?:
|
||||
scoutfs_setup_counters(sb) ?:
|
||||
scoutfs_options_setup(sb) ?:
|
||||
scoutfs_sysfs_create_attrs(sb, &sbi->mopts_ssa,
|
||||
mount_options_attrs, "mount_options") ?:
|
||||
scoutfs_setup_triggers(sb) ?:
|
||||
scoutfs_fence_setup(sb) ?:
|
||||
scoutfs_block_setup(sb) ?:
|
||||
scoutfs_forest_setup(sb) ?:
|
||||
scoutfs_item_setup(sb) ?:
|
||||
scoutfs_wkic_setup(sb) ?:
|
||||
scoutfs_inode_setup(sb) ?:
|
||||
scoutfs_quota_setup(sb) ?:
|
||||
scoutfs_data_setup(sb) ?:
|
||||
scoutfs_setup_trans(sb) ?:
|
||||
scoutfs_omap_setup(sb) ?:
|
||||
scoutfs_lock_setup(sb) ?:
|
||||
scoutfs_net_setup(sb) ?:
|
||||
scoutfs_recov_setup(sb) ?:
|
||||
scoutfs_server_setup(sb) ?:
|
||||
scoutfs_quorum_setup(sb) ?:
|
||||
scoutfs_client_setup(sb) ?:
|
||||
scoutfs_lock_rid(sb, SCOUTFS_LOCK_WRITE, 0, sbi->rid,
|
||||
&sbi->rid_lock) ?:
|
||||
scoutfs_trans_get_log_trees(sb) ?:
|
||||
scoutfs_volopt_setup(sb) ?:
|
||||
scoutfs_srch_setup(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO);
|
||||
/* this interruptible iget lets hung mount be aborted with ctl-c */
|
||||
inode = scoutfs_iget(sb, SCOUTFS_ROOT_INO, SCOUTFS_LKF_INTERRUPTIBLE, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -615,12 +616,15 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = scoutfs_client_advance_seq(sb, &sbi->trans_seq);
|
||||
/* send requests once iget progress shows we had a server */
|
||||
ret = scoutfs_trans_get_log_trees(sb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* start up background services that use everything else */
|
||||
scoutfs_inode_start(sb);
|
||||
scoutfs_forest_start(sb);
|
||||
scoutfs_trans_restart_sync_deadline(sb);
|
||||
// scoutfs_scan_orphans(sb);
|
||||
ret = 0;
|
||||
out:
|
||||
/* on error, generic_shutdown_super calls put_super if s_root */
|
||||
@@ -641,7 +645,18 @@ static struct dentry *scoutfs_mount(struct file_system_type *fs_type, int flags,
|
||||
*/
|
||||
static void scoutfs_kill_sb(struct super_block *sb)
|
||||
{
|
||||
trace_scoutfs_kill_sb(sb);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi) {
|
||||
sbi->unmounting = true;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
if (SCOUTFS_HAS_SBI(sb)) {
|
||||
scoutfs_options_stop(sb);
|
||||
scoutfs_inode_orphan_stop(sb);
|
||||
scoutfs_lock_unmount_begin(sb);
|
||||
}
|
||||
|
||||
kill_block_super(sb);
|
||||
}
|
||||
@@ -659,7 +674,6 @@ MODULE_ALIAS_FS("scoutfs");
|
||||
static void teardown_module(void)
|
||||
{
|
||||
debugfs_remove(scoutfs_debugfs_root);
|
||||
scoutfs_dir_exit();
|
||||
scoutfs_inode_exit();
|
||||
scoutfs_sysfs_exit();
|
||||
}
|
||||
@@ -674,11 +688,15 @@ static int __init scoutfs_module_init(void)
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
".section .note.git_describe,\"a\"\n"
|
||||
".string \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".ascii \""SCOUTFS_GIT_DESCRIBE"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_interop_version,\"a\"\n"
|
||||
".string \""SCOUTFS_INTEROP_VERSION_STR"\\n\"\n"
|
||||
".section .note.scoutfs_format_version_min,\"a\"\n"
|
||||
".ascii \""SCOUTFS_FORMAT_VERSION_MIN_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
__asm__ __volatile__ (
|
||||
".section .note.scoutfs_format_version_max,\"a\"\n"
|
||||
".ascii \""SCOUTFS_FORMAT_VERSION_MAX_STR"\\n\"\n"
|
||||
".previous\n");
|
||||
|
||||
scoutfs_init_counters();
|
||||
@@ -693,23 +711,23 @@ static int __init scoutfs_module_init(void)
|
||||
goto out;
|
||||
}
|
||||
ret = scoutfs_inode_init() ?:
|
||||
scoutfs_dir_init() ?:
|
||||
register_filesystem(&scoutfs_fs_type);
|
||||
out:
|
||||
if (ret)
|
||||
teardown_module();
|
||||
return ret;
|
||||
}
|
||||
module_init(scoutfs_module_init)
|
||||
module_init(scoutfs_module_init);
|
||||
|
||||
static void __exit scoutfs_module_exit(void)
|
||||
{
|
||||
unregister_filesystem(&scoutfs_fs_type);
|
||||
teardown_module();
|
||||
}
|
||||
module_exit(scoutfs_module_exit)
|
||||
module_exit(scoutfs_module_exit);
|
||||
|
||||
MODULE_AUTHOR("Zach Brown <zab@versity.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_INFO(git_describe, SCOUTFS_GIT_DESCRIBE);
|
||||
MODULE_INFO(scoutfs_interop_version, SCOUTFS_INTEROP_VERSION_STR);
|
||||
MODULE_INFO(scoutfs_format_version_min, SCOUTFS_FORMAT_VERSION_MIN_STR);
|
||||
MODULE_INFO(scoutfs_format_version_max, SCOUTFS_FORMAT_VERSION_MAX_STR);
|
||||
|
||||
@@ -26,20 +26,29 @@ struct net_info;
|
||||
struct block_info;
|
||||
struct forest_info;
|
||||
struct srch_info;
|
||||
struct recov_info;
|
||||
struct omap_info;
|
||||
struct volopt_info;
|
||||
struct fence_info;
|
||||
struct wkic_info;
|
||||
struct squota_info;
|
||||
|
||||
struct scoutfs_sb_info {
|
||||
struct super_block *sb;
|
||||
|
||||
/* assigned once at the start of each mount, read-only */
|
||||
u64 fsid;
|
||||
u64 rid;
|
||||
struct scoutfs_lock *rid_lock;
|
||||
|
||||
struct scoutfs_super_block super;
|
||||
u64 fmt_vers;
|
||||
|
||||
struct block_device *meta_bdev;
|
||||
#ifdef KC_BDEV_FILE_OPEN_BY_PATH
|
||||
struct file *meta_bdev_file;
|
||||
#endif
|
||||
|
||||
spinlock_t next_ino_lock;
|
||||
|
||||
struct options_info *options_info;
|
||||
struct data_info *data_info;
|
||||
struct inode_sb_info *inode_sb_info;
|
||||
struct btree_info *btree_info;
|
||||
@@ -48,40 +57,34 @@ struct scoutfs_sb_info {
|
||||
struct block_info *block_info;
|
||||
struct forest_info *forest_info;
|
||||
struct srch_info *srch_info;
|
||||
struct omap_info *omap_info;
|
||||
struct volopt_info *volopt_info;
|
||||
struct item_cache_info *item_cache_info;
|
||||
|
||||
wait_queue_head_t trans_hold_wq;
|
||||
struct task_struct *trans_task;
|
||||
struct wkic_info *wkic_info;
|
||||
struct squota_info *squota_info;
|
||||
struct fence_info *fence_info;
|
||||
|
||||
/* tracks tasks waiting for data extents */
|
||||
struct scoutfs_data_wait_root data_wait_root;
|
||||
|
||||
spinlock_t trans_write_lock;
|
||||
u64 trans_write_count;
|
||||
/* set as transaction opens with trans holders excluded */
|
||||
u64 trans_seq;
|
||||
int trans_write_ret;
|
||||
struct delayed_work trans_write_work;
|
||||
wait_queue_head_t trans_write_wq;
|
||||
struct workqueue_struct *trans_write_workq;
|
||||
bool trans_deadline_expired;
|
||||
|
||||
struct trans_info *trans_info;
|
||||
struct lock_info *lock_info;
|
||||
struct lock_server_info *lock_server_info;
|
||||
struct client_info *client_info;
|
||||
struct server_info *server_info;
|
||||
struct recov_info *recov_info;
|
||||
struct sysfs_info *sfsinfo;
|
||||
|
||||
struct scoutfs_counters *counters;
|
||||
struct scoutfs_triggers *triggers;
|
||||
|
||||
struct mount_options opts;
|
||||
struct options_sb_info *options;
|
||||
struct scoutfs_sysfs_attrs mopts_ssa;
|
||||
|
||||
struct dentry *debug_root;
|
||||
|
||||
bool shutdown;
|
||||
bool forced_unmount;
|
||||
bool unmounting;
|
||||
|
||||
unsigned long corruption_messages_once[SC_NR_LONGS];
|
||||
};
|
||||
@@ -101,7 +104,31 @@ static inline bool SCOUTFS_IS_META_BDEV(struct scoutfs_super_block *super_block)
|
||||
return !!(le64_to_cpu(super_block->flags) & SCOUTFS_FLAG_IS_META_BDEV);
|
||||
}
|
||||
|
||||
#ifdef KC_HAVE_BLK_MODE_T
|
||||
#define SCOUTFS_META_BDEV_MODE (BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_EXCL)
|
||||
#else
|
||||
#define SCOUTFS_META_BDEV_MODE (FMODE_READ | FMODE_WRITE | FMODE_EXCL)
|
||||
#endif
|
||||
|
||||
static inline bool scoutfs_forcing_unmount(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return sbi->forced_unmount;
|
||||
}
|
||||
|
||||
/*
|
||||
* True if we're shutting down the system and can be used as a coarse
|
||||
* indicator that we can avoid doing some work that no longer makes
|
||||
* sense.
|
||||
*/
|
||||
static inline bool scoutfs_unmounting(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
smp_rmb();
|
||||
return !sbi || sbi->unmounting;
|
||||
}
|
||||
|
||||
/*
|
||||
* A small string embedded in messages that's used to identify a
|
||||
@@ -118,14 +145,14 @@ static inline bool SCOUTFS_IS_META_BDEV(struct scoutfs_super_block *super_block)
|
||||
(int)(le64_to_cpu(fsid) >> SCSB_SHIFT), \
|
||||
(int)(le64_to_cpu(rid) >> SCSB_SHIFT)
|
||||
#define SCSB_ARGS(sb) \
|
||||
(int)(le64_to_cpu(SCOUTFS_SB(sb)->super.hdr.fsid) >> SCSB_SHIFT), \
|
||||
(int)(SCOUTFS_SB(sb)->fsid >> SCSB_SHIFT), \
|
||||
(int)(SCOUTFS_SB(sb)->rid >> SCSB_SHIFT)
|
||||
#define SCSB_TRACE_FIELDS \
|
||||
__field(__u64, fsid) \
|
||||
__field(__u64, rid)
|
||||
#define SCSB_TRACE_ASSIGN(sb) \
|
||||
__entry->fsid = SCOUTFS_HAS_SBI(sb) ? \
|
||||
le64_to_cpu(SCOUTFS_SB(sb)->super.hdr.fsid) : 0;\
|
||||
SCOUTFS_SB(sb)->fsid : 0; \
|
||||
__entry->rid = SCOUTFS_HAS_SBI(sb) ? \
|
||||
SCOUTFS_SB(sb)->rid : 0;
|
||||
#define SCSB_TRACE_ARGS \
|
||||
@@ -140,6 +167,17 @@ int scoutfs_write_super(struct super_block *sb,
|
||||
/* to keep this out of the ioctl.h public interface definition */
|
||||
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
||||
__le64 scoutfs_clock_sync_id(void);
|
||||
/*
|
||||
* Returns 0 when supported, non-zero -errno when unsupported.
|
||||
*/
|
||||
static inline int scoutfs_fmt_vers_unsupported(struct super_block *sb, u64 vers)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
if (sbi && (sbi->fmt_vers < vers))
|
||||
return -EOPNOTSUPP;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "sysfs.h"
|
||||
@@ -37,14 +38,32 @@ struct attr_funcs {
|
||||
#define ATTR_FUNCS_RO(_name) \
|
||||
static struct attr_funcs _name##_attr_funcs = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t data_device_maj_min_show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
MAJOR(sb->s_bdev->bd_dev), MINOR(sb->s_bdev->bd_dev));
|
||||
}
|
||||
ATTR_FUNCS_RO(data_device_maj_min);
|
||||
|
||||
static ssize_t format_version_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sbi->fmt_vers);
|
||||
}
|
||||
ATTR_FUNCS_RO(format_version);
|
||||
|
||||
static ssize_t fsid_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct super_block *sb = KOBJ_TO_SB(kobj, sb_id_kobj);
|
||||
struct scoutfs_super_block *super = &SCOUTFS_SB(sb)->super;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n",
|
||||
le64_to_cpu(super->hdr.fsid));
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n", sbi->fsid);
|
||||
}
|
||||
ATTR_FUNCS_RO(fsid);
|
||||
|
||||
@@ -91,6 +110,8 @@ static ssize_t attr_funcs_show(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
|
||||
static struct attribute *sb_id_attrs[] = {
|
||||
&data_device_maj_min_attr_funcs.attr,
|
||||
&format_version_attr_funcs.attr,
|
||||
&fsid_attr_funcs.attr,
|
||||
&rid_attr_funcs.attr,
|
||||
NULL,
|
||||
@@ -131,9 +152,10 @@ void scoutfs_sysfs_init_attrs(struct super_block *sb,
|
||||
* If this returns success then the file will be visible and show can
|
||||
* be called until unmount.
|
||||
*/
|
||||
int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...)
|
||||
int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
struct kobject *parent,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
size_t name_len;
|
||||
@@ -174,8 +196,8 @@ int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kobject_init_and_add(&ssa->kobj, &ssa->ktype,
|
||||
scoutfs_sysfs_sb_dir(sb), "%s", ssa->name);
|
||||
ret = kobject_init_and_add(&ssa->kobj, &ssa->ktype, parent,
|
||||
"%s", ssa->name);
|
||||
out:
|
||||
if (ret) {
|
||||
kfree(ssa->name);
|
||||
@@ -246,7 +268,7 @@ int __init scoutfs_sysfs_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __exit scoutfs_sysfs_exit(void)
|
||||
void scoutfs_sysfs_exit(void)
|
||||
{
|
||||
if (scoutfs_kset)
|
||||
kset_unregister(scoutfs_kset);
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
|
||||
#define SCOUTFS_ATTR_RO(_name) \
|
||||
static struct kobj_attribute scoutfs_attr_##_name = __ATTR_RO(_name)
|
||||
#define SCOUTFS_ATTR_RW(_name) \
|
||||
static struct kobj_attribute scoutfs_attr_##_name = __ATTR_RW(_name)
|
||||
|
||||
#define SCOUTFS_ATTR_PTR(_name) \
|
||||
&scoutfs_attr_##_name.attr
|
||||
@@ -34,9 +36,14 @@ struct scoutfs_sysfs_attrs {
|
||||
|
||||
void scoutfs_sysfs_init_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa);
|
||||
int scoutfs_sysfs_create_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...);
|
||||
int scoutfs_sysfs_create_attrs_parent(struct super_block *sb,
|
||||
struct kobject *parent,
|
||||
struct scoutfs_sysfs_attrs *ssa,
|
||||
struct attribute **attrs, char *fmt, ...);
|
||||
#define scoutfs_sysfs_create_attrs(sb, ssa, attrs, fmt, args...) \
|
||||
scoutfs_sysfs_create_attrs_parent(sb, scoutfs_sysfs_sb_dir(sb), \
|
||||
ssa, attrs, fmt, ##args)
|
||||
|
||||
void scoutfs_sysfs_destroy_attrs(struct super_block *sb,
|
||||
struct scoutfs_sysfs_attrs *ssa);
|
||||
|
||||
@@ -46,6 +53,6 @@ int scoutfs_setup_sysfs(struct super_block *sb);
|
||||
void scoutfs_destroy_sysfs(struct super_block *sb);
|
||||
|
||||
int __init scoutfs_sysfs_init(void);
|
||||
void __exit scoutfs_sysfs_exit(void);
|
||||
void scoutfs_sysfs_exit(void);
|
||||
|
||||
#endif
|
||||
|
||||
90
kmod/src/totl.c
Normal file
90
kmod/src/totl.c
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "format.h"
|
||||
#include "forest.h"
|
||||
#include "totl.h"
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end)
|
||||
{
|
||||
scoutfs_key_set_zeros(start);
|
||||
start->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
scoutfs_key_set_ones(end);
|
||||
end->sk_zone = SCOUTFS_XATTR_TOTL_ZONE;
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg)
|
||||
{
|
||||
memset(merg, 0, sizeof(struct scoutfs_totl_merging));
|
||||
}
|
||||
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic)
|
||||
{
|
||||
struct scoutfs_xattr_totl_val *tval = val;
|
||||
|
||||
if (fic & FIC_FS_ROOT) {
|
||||
merg->fs_seq = seq;
|
||||
merg->fs_total = le64_to_cpu(tval->total);
|
||||
merg->fs_count = le64_to_cpu(tval->count);
|
||||
} else if (fic & FIC_FINALIZED) {
|
||||
merg->fin_seq = seq;
|
||||
merg->fin_total += le64_to_cpu(tval->total);
|
||||
merg->fin_count += le64_to_cpu(tval->count);
|
||||
} else {
|
||||
merg->log_seq = seq;
|
||||
merg->log_total += le64_to_cpu(tval->total);
|
||||
merg->log_count += le64_to_cpu(tval->count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* .totl. item merging has to be careful because the log btree merging
|
||||
* code can write partial results to the fs_root. This means that a
|
||||
* reader can see both cases where new finalized logs should be applied
|
||||
* to the old fs items and where old finalized logs have already been
|
||||
* applied to the partially merged fs items. Currently active logged
|
||||
* items are always applied on top of all cases.
|
||||
*
|
||||
* These cases are differentiated with a combination of sequence numbers
|
||||
* in items, the count of contributing xattrs, and a flag
|
||||
* differentiating finalized and active logged items. This lets us
|
||||
* recognize all cases, including when finalized logs were merged and
|
||||
* deleted the fs item.
|
||||
*/
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count)
|
||||
{
|
||||
*total = 0;
|
||||
*count = 0;
|
||||
|
||||
/* start with the fs item if we have it */
|
||||
if (merg->fs_seq != 0) {
|
||||
*total = merg->fs_total;
|
||||
*count = merg->fs_count;
|
||||
}
|
||||
|
||||
/* apply finalized logs if they're newer or creating */
|
||||
if (((merg->fs_seq != 0) && (merg->fin_seq > merg->fs_seq)) ||
|
||||
((merg->fs_seq == 0) && (merg->fin_count > 0))) {
|
||||
*total += merg->fin_total;
|
||||
*count += merg->fin_count;
|
||||
}
|
||||
|
||||
/* always apply active logs which must be newer than fs and finalized */
|
||||
if (merg->log_seq > 0) {
|
||||
*total += merg->log_total;
|
||||
*count += merg->log_count;
|
||||
}
|
||||
}
|
||||
24
kmod/src/totl.h
Normal file
24
kmod/src/totl.h
Normal file
@@ -0,0 +1,24 @@
|
||||
#ifndef _SCOUTFS_TOTL_H_
|
||||
#define _SCOUTFS_TOTL_H_
|
||||
|
||||
#include "key.h"
|
||||
|
||||
struct scoutfs_totl_merging {
|
||||
u64 fs_seq;
|
||||
u64 fs_total;
|
||||
u64 fs_count;
|
||||
u64 fin_seq;
|
||||
u64 fin_total;
|
||||
s64 fin_count;
|
||||
u64 log_seq;
|
||||
u64 log_total;
|
||||
s64 log_count;
|
||||
};
|
||||
|
||||
void scoutfs_totl_set_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_totl_merge_init(struct scoutfs_totl_merging *merg);
|
||||
void scoutfs_totl_merge_contribute(struct scoutfs_totl_merging *merg,
|
||||
u64 seq, u8 flags, void *val, int val_len, int fic);
|
||||
void scoutfs_totl_merge_resolve(struct scoutfs_totl_merging *merg, __u64 *total, __u64 *count);
|
||||
|
||||
#endif
|
||||
143
kmod/src/trace/quota.h
Normal file
143
kmod/src/trace/quota.h
Normal file
@@ -0,0 +1,143 @@
|
||||
|
||||
/*
|
||||
* Tracing squota_input
|
||||
*/
|
||||
#define SQI_FMT "[%u %llu %llu %llu]"
|
||||
|
||||
#define SQI_ARGS(i) \
|
||||
(i)->op, (i)->attrs[0], (i)->attrs[1], (i)->attrs[2]
|
||||
|
||||
#define SQI_FIELDS(pref) \
|
||||
__array(__u64, pref##_attrs, SQ_NS__NR_SELECT) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQI_ASSIGN(pref, i) \
|
||||
__entry->pref##_attrs[0] = (i)->attrs[0]; \
|
||||
__entry->pref##_attrs[1] = (i)->attrs[1]; \
|
||||
__entry->pref##_attrs[2] = (i)->attrs[2]; \
|
||||
__entry->pref##_op = (i)->op;
|
||||
|
||||
#define SQI_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_op, __entry->pref##_attrs[0], \
|
||||
__entry->pref##_attrs[1], __entry->pref##_attrs[2]
|
||||
|
||||
/*
|
||||
* Tracing squota_rule
|
||||
*/
|
||||
#define SQR_FMT "[%u %llu,%u,%x %llu,%u,%x %llu,%u,%x %u %llu]"
|
||||
|
||||
#define SQR_ARGS(r) \
|
||||
(r)->prio, \
|
||||
(r)->name_val[0], (r)->name_source[0], (r)->name_flags[0], \
|
||||
(r)->name_val[1], (r)->name_source[1], (r)->name_flags[1], \
|
||||
(r)->name_val[2], (r)->name_source[2], (r)->name_flags[2], \
|
||||
(r)->op, (r)->limit \
|
||||
|
||||
#define SQR_FIELDS(pref) \
|
||||
__array(__u64, pref##_name_val, 3) \
|
||||
__field(__u64, pref##_limit) \
|
||||
__array(__u8, pref##_name_source, 3) \
|
||||
__array(__u8, pref##_name_flags, 3) \
|
||||
__field(__u8, pref##_prio) \
|
||||
__field(__u8, pref##_op)
|
||||
|
||||
#define SQR_ASSIGN(pref, r) \
|
||||
__entry->pref##_name_val[0] = (r)->names[0].val; \
|
||||
__entry->pref##_name_val[1] = (r)->names[1].val; \
|
||||
__entry->pref##_name_val[2] = (r)->names[2].val; \
|
||||
__entry->pref##_limit = (r)->limit; \
|
||||
__entry->pref##_name_source[0] = (r)->names[0].source; \
|
||||
__entry->pref##_name_source[1] = (r)->names[1].source; \
|
||||
__entry->pref##_name_source[2] = (r)->names[2].source; \
|
||||
__entry->pref##_name_flags[0] = (r)->names[0].flags; \
|
||||
__entry->pref##_name_flags[1] = (r)->names[1].flags; \
|
||||
__entry->pref##_name_flags[2] = (r)->names[2].flags; \
|
||||
__entry->pref##_prio = (r)->prio; \
|
||||
__entry->pref##_op = (r)->op;
|
||||
|
||||
#define SQR_ENTRY_ARGS(pref) \
|
||||
__entry->pref##_prio, __entry->pref##_name_val[0], \
|
||||
__entry->pref##_name_source[0], __entry->pref##_name_flags[0], \
|
||||
__entry->pref##_name_val[1], __entry->pref##_name_source[1], \
|
||||
__entry->pref##_name_flags[1], __entry->pref##_name_val[2], \
|
||||
__entry->pref##_name_source[2], __entry->pref##_name_flags[2], \
|
||||
__entry->pref##_op, __entry->pref##_limit
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_check,
|
||||
TP_PROTO(struct super_block *sb, long rs_ptr, struct squota_input *inp, int ret),
|
||||
|
||||
TP_ARGS(sb, rs_ptr, inp, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(long, rs_ptr)
|
||||
SQI_FIELDS(i)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->rs_ptr = rs_ptr;
|
||||
SQI_ASSIGN(i, inp);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" rs_ptr %ld ret %d inp "SQI_FMT,
|
||||
SCSB_TRACE_ARGS, __entry->rs_ptr, __entry->ret, SQI_ENTRY_ARGS(i))
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_quota_rule_op_class,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
|
||||
TP_ARGS(sb, rule, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQR_FIELDS(r)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQR_ASSIGN(r, rule);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" "SQR_FMT" ret %d",
|
||||
SCSB_TRACE_ARGS, SQR_ENTRY_ARGS(r), __entry->ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_add_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_quota_rule_op_class, scoutfs_quota_del_rule,
|
||||
TP_PROTO(struct super_block *sb, struct squota_rule *rule, int ret),
|
||||
TP_ARGS(sb, rule, ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_quota_totl_check,
|
||||
TP_PROTO(struct super_block *sb, struct squota_input *inp, struct scoutfs_key *key,
|
||||
u64 limit, int ret),
|
||||
|
||||
TP_ARGS(sb, inp, key, limit, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
SQI_FIELDS(i)
|
||||
sk_trace_define(k)
|
||||
__field(__u64, limit)
|
||||
__field(int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
SQI_ASSIGN(i, inp);
|
||||
sk_trace_assign(k, key);
|
||||
__entry->limit = limit;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" inp "SQI_FMT" key "SK_FMT" limit %llu ret %d",
|
||||
SCSB_TRACE_ARGS, SQI_ENTRY_ARGS(i), sk_trace_args(k), __entry->limit,
|
||||
__entry->ret)
|
||||
);
|
||||
112
kmod/src/trace/wkic.h
Normal file
112
kmod/src/trace/wkic.h
Normal file
@@ -0,0 +1,112 @@
|
||||
|
||||
DECLARE_EVENT_CLASS(scoutfs_wkic_wpage_class,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
__field(void *, ptr)
|
||||
__field(int, which)
|
||||
__field(bool, n0l)
|
||||
__field(bool, n1l)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
__entry->ptr = ptr;
|
||||
__entry->which = which;
|
||||
__entry->n0l = n0l;
|
||||
__entry->n1l = n1l;
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
__entry->which = which;
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" ptr %p wh %d nl %u,%u start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
__entry->ptr, __entry->which, __entry->n0l, __entry->n1l,
|
||||
sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_alloced,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_freeing,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_found,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_trimmed,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_erased,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserting,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_inserted,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_shrinking,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_dropping,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_replaying,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
DEFINE_EVENT(scoutfs_wkic_wpage_class, scoutfs_wkic_wpage_filled,
|
||||
TP_PROTO(struct super_block *sb, void *ptr, int which, bool n0l, bool n1l,
|
||||
struct scoutfs_key *start, struct scoutfs_key *end),
|
||||
TP_ARGS(sb, ptr, which, n0l, n1l, start, end)
|
||||
);
|
||||
|
||||
TRACE_EVENT(scoutfs_wkic_read_items,
|
||||
TP_PROTO(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *start,
|
||||
struct scoutfs_key *end),
|
||||
|
||||
TP_ARGS(sb, key, start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
SCSB_TRACE_FIELDS
|
||||
sk_trace_define(key)
|
||||
sk_trace_define(start)
|
||||
sk_trace_define(end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
SCSB_TRACE_ASSIGN(sb);
|
||||
sk_trace_assign(key, start);
|
||||
sk_trace_assign(start, start);
|
||||
sk_trace_assign(end, end);
|
||||
),
|
||||
|
||||
TP_printk(SCSBF" key "SK_FMT" start "SK_FMT " end "SK_FMT, SCSB_TRACE_ARGS,
|
||||
sk_trace_args(key), sk_trace_args(start), sk_trace_args(end))
|
||||
);
|
||||
367
kmod/src/trans.c
367
kmod/src/trans.c
@@ -17,6 +17,7 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "trans.h"
|
||||
@@ -53,15 +54,24 @@
|
||||
/* sync dirty data at least this often */
|
||||
#define TRANS_SYNC_DELAY (HZ * 10)
|
||||
|
||||
/*
|
||||
* XXX move the rest of the super trans_ fields here.
|
||||
*/
|
||||
struct trans_info {
|
||||
struct super_block *sb;
|
||||
|
||||
atomic_t holders;
|
||||
|
||||
struct scoutfs_log_trees lt;
|
||||
struct scoutfs_alloc alloc;
|
||||
struct scoutfs_block_writer wri;
|
||||
|
||||
wait_queue_head_t hold_wq;
|
||||
struct task_struct *task;
|
||||
spinlock_t write_lock;
|
||||
u64 write_count;
|
||||
int write_ret;
|
||||
struct delayed_work write_work;
|
||||
wait_queue_head_t write_wq;
|
||||
struct workqueue_struct *write_workq;
|
||||
bool deadline_expired;
|
||||
};
|
||||
|
||||
#define DECLARE_TRANS_INFO(sb, name) \
|
||||
@@ -91,6 +101,7 @@ static int commit_btrees(struct super_block *sb)
|
||||
*/
|
||||
int scoutfs_trans_get_log_trees(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_log_trees lt;
|
||||
int ret = 0;
|
||||
@@ -103,6 +114,11 @@ int scoutfs_trans_get_log_trees(struct super_block *sb)
|
||||
|
||||
scoutfs_forest_init_btrees(sb, &tri->alloc, &tri->wri, <);
|
||||
scoutfs_data_init_btrees(sb, &tri->alloc, &tri->wri, <);
|
||||
|
||||
/* first set during mount from 0 to nonzero allows commits */
|
||||
spin_lock(&tri->write_lock);
|
||||
sbi->trans_seq = le64_to_cpu(lt.get_trans_seq);
|
||||
spin_unlock(&tri->write_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -120,13 +136,12 @@ bool scoutfs_trans_has_dirty(struct super_block *sb)
|
||||
*/
|
||||
static void sub_holders_and_wake(struct super_block *sb, int val)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
atomic_sub(val, &tri->holders);
|
||||
smp_mb(); /* make sure sub is visible before we wake */
|
||||
if (waitqueue_active(&sbi->trans_hold_wq))
|
||||
wake_up(&sbi->trans_hold_wq);
|
||||
if (waitqueue_active(&tri->hold_wq))
|
||||
wake_up(&tri->hold_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -144,6 +159,58 @@ static bool drained_holders(struct trans_info *tri)
|
||||
return holders == 0;
|
||||
}
|
||||
|
||||
static int commit_current_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
return (*str = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(*str = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(*str = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(*str = "alloc prepare", scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri)) ?:
|
||||
(*str = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(*str = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(*str = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb);
|
||||
}
|
||||
|
||||
static int get_next_log_trees(struct super_block *sb, char **str)
|
||||
{
|
||||
return (*str = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
}
|
||||
|
||||
static int retry_forever(struct super_block *sb, int (*func)(struct super_block *sb, char **str))
|
||||
{
|
||||
bool retrying = false;
|
||||
char *str;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
str = NULL;
|
||||
|
||||
ret = func(sb, &str);
|
||||
if (ret < 0) {
|
||||
if (!retrying) {
|
||||
scoutfs_warn(sb, "critical transaction commit failure: %s = %d, retrying",
|
||||
str, ret);
|
||||
retrying = true;
|
||||
}
|
||||
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(2 * MSEC_PER_SEC);
|
||||
|
||||
} else if (retrying) {
|
||||
scoutfs_info(sb, "retried transaction commit succeeded");
|
||||
}
|
||||
|
||||
} while (ret < 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This work func is responsible for writing out all the dirty blocks
|
||||
* that make up the current dirty transaction. It prevents writers from
|
||||
@@ -154,90 +221,63 @@ static bool drained_holders(struct trans_info *tri)
|
||||
* functions that would try to hold the transaction. We record the task
|
||||
* whose committing the transaction so that holding won't deadlock.
|
||||
*
|
||||
* Any dirty block had to have allocated a new blkno which would have
|
||||
* created dirty allocator metadata blocks. We can avoid writing
|
||||
* entirely if we don't have any dirty metadata blocks. This is
|
||||
* important because we don't try to serialize this work during
|
||||
* unmount.. we can execute as the vfs is shutting down.. we need to
|
||||
* decide that nothing is dirty without calling the vfs at all.
|
||||
* Once we clear the write func bit in holders then waiting holders can
|
||||
* enter the transaction and continue modifying the transaction. Once
|
||||
* we start writing we consider the transaction done and won't exit,
|
||||
* clearing the write func bit, until get_log_trees has opened the next
|
||||
* transaction. The exception is forced unmount which is allowed to
|
||||
* generate errors and throw away data.
|
||||
*
|
||||
* We first try to sync the dirty inodes and write their dirty data blocks,
|
||||
* then we write all our dirty metadata blocks, and only when those succeed
|
||||
* do we write the new super that references all of these newly written blocks.
|
||||
*
|
||||
* If there are write errors then blocks are kept dirty in memory and will
|
||||
* be written again at the next sync.
|
||||
* This means that the only way fsync can return an error is if we're in
|
||||
* forced unmount.
|
||||
*/
|
||||
void scoutfs_trans_write_func(struct work_struct *work)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = container_of(work, struct scoutfs_sb_info,
|
||||
trans_write_work.work);
|
||||
struct super_block *sb = sbi->sb;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
u64 trans_seq = sbi->trans_seq;
|
||||
char *s = NULL;
|
||||
struct trans_info *tri = container_of(work, struct trans_info, write_work.work);
|
||||
struct super_block *sb = tri->sb;
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
int ret = 0;
|
||||
|
||||
sbi->trans_task = current;
|
||||
tri->task = current;
|
||||
|
||||
/* mark that we're writing so holders wait for us to finish and clear our bit */
|
||||
atomic_add(TRANS_HOLDERS_WRITE_FUNC_BIT, &tri->holders);
|
||||
|
||||
wait_event(sbi->trans_hold_wq, drained_holders(tri));
|
||||
wait_event(tri->hold_wq, drained_holders(tri));
|
||||
|
||||
trace_scoutfs_trans_write_func(sb,
|
||||
scoutfs_block_writer_dirty_bytes(sb, &tri->wri));
|
||||
|
||||
if (!scoutfs_block_writer_has_dirty(sb, &tri->wri) &&
|
||||
!scoutfs_item_dirty_pages(sb)) {
|
||||
if (sbi->trans_deadline_expired) {
|
||||
/*
|
||||
* If we're not writing data then we only advance the
|
||||
* seq at the sync deadline interval. This keeps idle
|
||||
* mounts from pinning a seq and stopping readers of the
|
||||
* seq indices but doesn't send a message for every sync
|
||||
* syscall.
|
||||
*/
|
||||
ret = scoutfs_client_advance_seq(sb, &trans_seq);
|
||||
if (ret < 0)
|
||||
s = "clean advance seq";
|
||||
}
|
||||
/* mount hasn't opened first transaction yet, still complete sync */
|
||||
if (sbi->trans_seq == 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sbi->trans_deadline_expired)
|
||||
if (scoutfs_forcing_unmount(sb)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_scoutfs_trans_write_func(sb, scoutfs_block_writer_dirty_bytes(sb, &tri->wri),
|
||||
scoutfs_item_dirty_pages(sb));
|
||||
|
||||
if (tri->deadline_expired)
|
||||
scoutfs_inc_counter(sb, trans_commit_timer);
|
||||
|
||||
scoutfs_inc_counter(sb, trans_commit_written);
|
||||
|
||||
/* XXX this all needs serious work for dealing with errors */
|
||||
ret = (s = "data submit", scoutfs_inode_walk_writeback(sb, true)) ?:
|
||||
(s = "item dirty", scoutfs_item_write_dirty(sb)) ?:
|
||||
(s = "data prepare", scoutfs_data_prepare_commit(sb)) ?:
|
||||
(s = "alloc prepare", scoutfs_alloc_prepare_commit(sb,
|
||||
&tri->alloc, &tri->wri)) ?:
|
||||
(s = "meta write", scoutfs_block_writer_write(sb, &tri->wri)) ?:
|
||||
(s = "data wait", scoutfs_inode_walk_writeback(sb, false)) ?:
|
||||
(s = "commit log trees", commit_btrees(sb)) ?:
|
||||
scoutfs_item_write_done(sb) ?:
|
||||
(s = "advance seq", scoutfs_client_advance_seq(sb, &trans_seq)) ?:
|
||||
(s = "get log trees", scoutfs_trans_get_log_trees(sb));
|
||||
/* retry {commit,get}_log_trees until they succeeed, can only fail when forcing unmount */
|
||||
ret = retry_forever(sb, commit_current_log_trees) ?:
|
||||
retry_forever(sb, get_next_log_trees);
|
||||
out:
|
||||
if (ret < 0)
|
||||
scoutfs_err(sb, "critical transaction commit failure: %s, %d",
|
||||
s, ret);
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
sbi->trans_write_count++;
|
||||
sbi->trans_write_ret = ret;
|
||||
sbi->trans_seq = trans_seq;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
wake_up(&sbi->trans_write_wq);
|
||||
spin_lock(&tri->write_lock);
|
||||
tri->write_count++;
|
||||
tri->write_ret = ret;
|
||||
spin_unlock(&tri->write_lock);
|
||||
wake_up(&tri->write_wq);
|
||||
|
||||
/* we're done, wake waiting holders */
|
||||
sub_holders_and_wake(sb, TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
|
||||
sbi->trans_task = NULL;
|
||||
tri->task = NULL;
|
||||
|
||||
scoutfs_trans_restart_sync_deadline(sb);
|
||||
}
|
||||
@@ -248,17 +288,17 @@ struct write_attempt {
|
||||
};
|
||||
|
||||
/* this is called as a wait_event() condition so it can't change task state */
|
||||
static int write_attempted(struct scoutfs_sb_info *sbi,
|
||||
struct write_attempt *attempt)
|
||||
static int write_attempted(struct super_block *sb, struct write_attempt *attempt)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
int done = 1;
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
if (sbi->trans_write_count > attempt->count)
|
||||
attempt->ret = sbi->trans_write_ret;
|
||||
spin_lock(&tri->write_lock);
|
||||
if (tri->write_count > attempt->count)
|
||||
attempt->ret = tri->write_ret;
|
||||
else
|
||||
done = 0;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
spin_unlock(&tri->write_lock);
|
||||
|
||||
return done;
|
||||
}
|
||||
@@ -268,10 +308,12 @@ static int write_attempted(struct scoutfs_sb_info *sbi,
|
||||
* We always have delayed sync work pending but the caller wants it
|
||||
* to execute immediately.
|
||||
*/
|
||||
static void queue_trans_work(struct scoutfs_sb_info *sbi)
|
||||
static void queue_trans_work(struct super_block *sb)
|
||||
{
|
||||
sbi->trans_deadline_expired = false;
|
||||
mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work, 0);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
tri->deadline_expired = false;
|
||||
mod_delayed_work(tri->write_workq, &tri->write_work, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -284,26 +326,24 @@ static void queue_trans_work(struct scoutfs_sb_info *sbi)
|
||||
*/
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct write_attempt attempt;
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct write_attempt attempt = { .ret = 0 };
|
||||
int ret;
|
||||
|
||||
|
||||
if (!wait) {
|
||||
queue_trans_work(sbi);
|
||||
queue_trans_work(sb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
attempt.count = sbi->trans_write_count;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
spin_lock(&tri->write_lock);
|
||||
attempt.count = tri->write_count;
|
||||
spin_unlock(&tri->write_lock);
|
||||
|
||||
queue_trans_work(sbi);
|
||||
queue_trans_work(sb);
|
||||
|
||||
ret = wait_event_interruptible(sbi->trans_write_wq,
|
||||
write_attempted(sbi, &attempt));
|
||||
if (ret == 0)
|
||||
ret = attempt.ret;
|
||||
wait_event(tri->write_wq, write_attempted(sb, &attempt));
|
||||
ret = attempt.ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -319,10 +359,10 @@ int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
sbi->trans_deadline_expired = true;
|
||||
mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work,
|
||||
tri->deadline_expired = true;
|
||||
mod_delayed_work(tri->write_workq, &tri->write_work,
|
||||
TRANS_SYNC_DELAY);
|
||||
}
|
||||
|
||||
@@ -430,8 +470,8 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try to refill data allocator before premature enospc */
|
||||
if (scoutfs_data_alloc_free_bytes(sb) <= SCOUTFS_TRANS_DATA_ALLOC_LWM) {
|
||||
/* if we're low and can't refill then alloc could empty and return enospc */
|
||||
if (scoutfs_data_alloc_should_refill(sb, SCOUTFS_ALLOC_DATA_REFILL_THRESH)) {
|
||||
scoutfs_inc_counter(sb, trans_commit_data_alloc_low);
|
||||
return true;
|
||||
}
|
||||
@@ -439,38 +479,15 @@ static bool commit_before_hold(struct super_block *sb, struct trans_info *tri)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool acquired_hold(struct super_block *sb)
|
||||
/*
|
||||
* called as a wait_event condition, needs to be careful to not change
|
||||
* task state and is racing with waking paths that sub_return, test, and
|
||||
* wake.
|
||||
*/
|
||||
static bool holders_no_writer(struct trans_info *tri)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
bool acquired;
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
acquired = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait if the writer is blocking holds */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait if we're triggering another commit */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
release_holders(sb);
|
||||
queue_trans_work(sbi);
|
||||
acquired = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_scoutfs_trans_acquired_hold(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
acquired = true;
|
||||
out:
|
||||
return acquired;
|
||||
smp_mb(); /* make sure task in wait_event queue before atomic read */
|
||||
return !(atomic_read(&tri->holders) & TRANS_HOLDERS_WRITE_FUNC_BIT);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -486,15 +503,65 @@ out:
|
||||
* The writing thread marks itself as a global trans_task which
|
||||
* short-circuits all the hold machinery so it can call code that would
|
||||
* otherwise try to hold transactions while it is writing.
|
||||
*
|
||||
* If the caller is adding metadata items that will eventually consume
|
||||
* free space -- not dirtying existing items or adding deletion items --
|
||||
* then we can return enospc if our metadata allocator indicates that
|
||||
* we're low on space.
|
||||
*/
|
||||
int scoutfs_hold_trans(struct super_block *sb)
|
||||
int scoutfs_hold_trans(struct super_block *sb, bool allocing)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
if (current == tri->task)
|
||||
return 0;
|
||||
|
||||
return wait_event_interruptible(sbi->trans_hold_wq, acquired_hold(sb));
|
||||
for (;;) {
|
||||
/* shouldn't get holders until mount finishes, (not locking for cheap test) */
|
||||
if (WARN_ON_ONCE(sbi->trans_seq == 0)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* if a caller already has a hold we acquire unconditionally */
|
||||
if (inc_journal_info_holders()) {
|
||||
atomic_inc(&tri->holders);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* wait until the writer work is finished */
|
||||
if (!inc_holders_unless_writer(tri)) {
|
||||
dec_journal_info_holders();
|
||||
wait_event(tri->hold_wq, holders_no_writer(tri));
|
||||
continue;
|
||||
}
|
||||
|
||||
/* return enospc if server is into reserved blocks and we're allocating */
|
||||
if (allocing && scoutfs_alloc_test_flag(sb, &tri->alloc, SCOUTFS_ALLOC_FLAG_LOW)) {
|
||||
release_holders(sb);
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
/* see if we need to trigger and wait for a commit before holding */
|
||||
if (commit_before_hold(sb, tri)) {
|
||||
seq = scoutfs_trans_sample_seq(sb);
|
||||
release_holders(sb);
|
||||
queue_trans_work(sb);
|
||||
wait_event(tri->hold_wq, scoutfs_trans_sample_seq(sb) != seq);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_scoutfs_hold_trans(sb, current->journal_info, atomic_read(&tri->holders), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -511,15 +578,14 @@ bool scoutfs_trans_held(void)
|
||||
|
||||
void scoutfs_release_trans(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
if (current == sbi->trans_task)
|
||||
if (current == tri->task)
|
||||
return;
|
||||
|
||||
release_holders(sb);
|
||||
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders));
|
||||
trace_scoutfs_release_trans(sb, current->journal_info, atomic_read(&tri->holders), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -529,12 +595,13 @@ void scoutfs_release_trans(struct super_block *sb)
|
||||
*/
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb)
|
||||
{
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
u64 ret;
|
||||
|
||||
spin_lock(&sbi->trans_write_lock);
|
||||
spin_lock(&tri->write_lock);
|
||||
ret = sbi->trans_seq;
|
||||
spin_unlock(&sbi->trans_write_lock);
|
||||
spin_unlock(&tri->write_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -548,12 +615,17 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
if (!tri)
|
||||
return -ENOMEM;
|
||||
|
||||
tri->sb = sb;
|
||||
atomic_set(&tri->holders, 0);
|
||||
scoutfs_block_writer_init(sb, &tri->wri);
|
||||
|
||||
sbi->trans_write_workq = alloc_workqueue("scoutfs_trans",
|
||||
WQ_UNBOUND, 1);
|
||||
if (!sbi->trans_write_workq) {
|
||||
spin_lock_init(&tri->write_lock);
|
||||
INIT_DELAYED_WORK(&tri->write_work, scoutfs_trans_write_func);
|
||||
init_waitqueue_head(&tri->write_wq);
|
||||
init_waitqueue_head(&tri->hold_wq);
|
||||
|
||||
tri->write_workq = alloc_workqueue("scoutfs_trans", WQ_UNBOUND, 1);
|
||||
if (!tri->write_workq) {
|
||||
kfree(tri);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -564,8 +636,15 @@ int scoutfs_setup_trans(struct super_block *sb)
|
||||
}
|
||||
|
||||
/*
|
||||
* kill_sb calls sync before getting here so we know that dirty data
|
||||
* should be in flight. We just have to wait for it to quiesce.
|
||||
* While the vfs will have done an fs level sync before calling
|
||||
* put_super, we may have done work down in our level after all the fs
|
||||
* ops were done. An example is final inode deletion in iput, that's
|
||||
* done in generic_shutdown_super after the sync and before calling our
|
||||
* put_super.
|
||||
*
|
||||
* So we always try to write any remaining dirty transactions before
|
||||
* shutting down. Typically there won't be any dirty data and the
|
||||
* worker will just return.
|
||||
*/
|
||||
void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
{
|
||||
@@ -573,13 +652,19 @@ void scoutfs_shutdown_trans(struct super_block *sb)
|
||||
DECLARE_TRANS_INFO(sb, tri);
|
||||
|
||||
if (tri) {
|
||||
scoutfs_block_writer_forget_all(sb, &tri->wri);
|
||||
if (sbi->trans_write_workq) {
|
||||
cancel_delayed_work_sync(&sbi->trans_write_work);
|
||||
destroy_workqueue(sbi->trans_write_workq);
|
||||
if (tri->write_workq) {
|
||||
/* immediately queues pending timer */
|
||||
flush_delayed_work(&tri->write_work);
|
||||
/* prevents re-arming if it has to wait */
|
||||
cancel_delayed_work_sync(&tri->write_work);
|
||||
destroy_workqueue(tri->write_workq);
|
||||
/* trans work schedules after shutdown see null */
|
||||
sbi->trans_write_workq = NULL;
|
||||
tri->write_workq = NULL;
|
||||
}
|
||||
|
||||
scoutfs_alloc_prepare_commit(sb, &tri->alloc, &tri->wri);
|
||||
scoutfs_block_writer_forget_all(sb, &tri->wri);
|
||||
|
||||
kfree(tri);
|
||||
sbi->trans_info = NULL;
|
||||
}
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
#ifndef _SCOUTFS_TRANS_H_
|
||||
#define _SCOUTFS_TRANS_H_
|
||||
|
||||
/* the server will attempt to fill data allocs for each trans */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_HWM (2ULL * 1024 * 1024 * 1024)
|
||||
/* the client will force commits if data allocators get too low */
|
||||
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
|
||||
|
||||
void scoutfs_trans_write_func(struct work_struct *work);
|
||||
int scoutfs_trans_sync(struct super_block *sb, int wait);
|
||||
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
|
||||
|
||||
int scoutfs_hold_trans(struct super_block *sb);
|
||||
int scoutfs_hold_trans(struct super_block *sb, bool allocing);
|
||||
bool scoutfs_trans_held(void);
|
||||
void scoutfs_release_trans(struct super_block *sb);
|
||||
u64 scoutfs_trans_sample_seq(struct super_block *sb);
|
||||
|
||||
@@ -39,6 +39,9 @@ struct scoutfs_triggers {
|
||||
|
||||
static char *names[] = {
|
||||
[SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE] = "block_remove_stale",
|
||||
[SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE] = "srch_compact_logs_pad_safe",
|
||||
[SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE] = "srch_force_log_rotate",
|
||||
[SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE] = "srch_merge_stop_safe",
|
||||
[SCOUTFS_TRIGGER_STATFS_LOCK_PURGE] = "statfs_lock_purge",
|
||||
};
|
||||
|
||||
@@ -90,13 +93,9 @@ int scoutfs_setup_triggers(struct super_block *sb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++) {
|
||||
if (!debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i])) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(triggers->atomics); i++)
|
||||
debugfs_create_atomic_t(names[i], 0644, triggers->dir,
|
||||
&triggers->atomics[i]);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
|
||||
enum scoutfs_trigger {
|
||||
SCOUTFS_TRIGGER_BLOCK_REMOVE_STALE,
|
||||
SCOUTFS_TRIGGER_SRCH_COMPACT_LOGS_PAD_SAFE,
|
||||
SCOUTFS_TRIGGER_SRCH_FORCE_LOG_ROTATE,
|
||||
SCOUTFS_TRIGGER_SRCH_MERGE_STOP_SAFE,
|
||||
SCOUTFS_TRIGGER_STATFS_LOCK_PURGE,
|
||||
SCOUTFS_TRIGGER_NR,
|
||||
};
|
||||
|
||||
@@ -46,6 +46,23 @@ static struct scoutfs_tseq_entry *tseq_rb_next(struct scoutfs_tseq_entry *ent)
|
||||
return rb_entry(node, struct scoutfs_tseq_entry, node);
|
||||
}
|
||||
|
||||
#ifdef KC_RB_TREE_AUGMENTED_COMPUTE_MAX
|
||||
static bool tseq_compute_total(struct scoutfs_tseq_entry *ent, bool exit)
|
||||
{
|
||||
loff_t total = 1 + tseq_node_total(ent->node.rb_left) +
|
||||
tseq_node_total(ent->node.rb_right);
|
||||
|
||||
if (exit && ent->total == total)
|
||||
return true;
|
||||
|
||||
ent->total = total;
|
||||
return false;
|
||||
}
|
||||
|
||||
RB_DECLARE_CALLBACKS(static, tseq_rb_callbacks, struct scoutfs_tseq_entry,
|
||||
node, total, tseq_compute_total);
|
||||
#else
|
||||
|
||||
static loff_t tseq_compute_total(struct scoutfs_tseq_entry *ent)
|
||||
{
|
||||
return 1 + tseq_node_total(ent->node.rb_left) +
|
||||
@@ -53,7 +70,8 @@ static loff_t tseq_compute_total(struct scoutfs_tseq_entry *ent)
|
||||
}
|
||||
|
||||
RB_DECLARE_CALLBACKS(static, tseq_rb_callbacks, struct scoutfs_tseq_entry,
|
||||
node, loff_t, total, tseq_compute_total)
|
||||
node, loff_t, total, tseq_compute_total);
|
||||
#endif
|
||||
|
||||
void scoutfs_tseq_tree_init(struct scoutfs_tseq_tree *tree,
|
||||
scoutfs_tseq_show_t show)
|
||||
@@ -165,6 +183,13 @@ static void *scoutfs_tseq_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
ent = tseq_rb_next(ent);
|
||||
if (ent)
|
||||
*pos = ent->pos;
|
||||
else
|
||||
/*
|
||||
* once we hit the end, *pos is never used, but it has to
|
||||
* be updated to avoid an error in bpf_seq_read()
|
||||
*/
|
||||
(*pos)++;
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
||||
@@ -17,4 +17,15 @@ static inline void down_write_two(struct rw_semaphore *a,
|
||||
down_write_nested(b, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
/*
|
||||
* When returning shrinker counts from scan_objects, we should steer
|
||||
* clear of the magic SHRINK_STOP and SHRINK_EMPTY values, which are near
|
||||
* ~0UL values. Hence, we cap count to ~0L, which is arbitarily high
|
||||
* enough to avoid it.
|
||||
*/
|
||||
static inline long shrinker_min_long(long count)
|
||||
{
|
||||
return min(count, LONG_MAX);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
188
kmod/src/volopt.c
Normal file
188
kmod/src/volopt.c
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Versity Software, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "super.h"
|
||||
#include "client.h"
|
||||
#include "volopt.h"
|
||||
|
||||
/*
|
||||
* Volume options are exposed through a sysfs directory. Getting and
|
||||
* setting the values sends rpcs to the server who owns the options in
|
||||
* the super block.
|
||||
*/
|
||||
|
||||
struct volopt_info {
|
||||
struct super_block *sb;
|
||||
struct scoutfs_sysfs_attrs ssa;
|
||||
};
|
||||
|
||||
#define DECLARE_VOLOPT_INFO(sb, name) \
|
||||
struct volopt_info *name = SCOUTFS_SB(sb)->volopt_info
|
||||
#define DECLARE_VOLOPT_INFO_KOBJ(kobj, name) \
|
||||
DECLARE_VOLOPT_INFO(SCOUTFS_SYSFS_ATTRS_SB(kobj), name)
|
||||
|
||||
/*
|
||||
* attribute arrays need to be dense but the options we export could
|
||||
* well become sparse over time. .store and .load are generic and we
|
||||
* have a lookup table to map the attributes array indexes to the number
|
||||
* and name of the option.
|
||||
*/
|
||||
static struct volopt_nr_name {
|
||||
int nr;
|
||||
char *name;
|
||||
} volopt_table[] = {
|
||||
{ SCOUTFS_VOLOPT_DATA_ALLOC_ZONE_BLOCKS_NR, "data_alloc_zone_blocks" },
|
||||
};
|
||||
|
||||
/* initialized by setup, pointer array is null terminated */
|
||||
static struct kobj_attribute volopt_attrs[ARRAY_SIZE(volopt_table)];
|
||||
static struct attribute *volopt_attr_ptrs[ARRAY_SIZE(volopt_table) + 1];
|
||||
|
||||
static void get_opt_data(struct kobj_attribute *attr, struct scoutfs_volume_options *volopt,
|
||||
u64 *bit, __le64 **opt)
|
||||
{
|
||||
size_t index = attr - &volopt_attrs[0];
|
||||
int nr = volopt_table[index].nr;
|
||||
|
||||
*bit = 1ULL << nr;
|
||||
*opt = &volopt->set_bits + 1 + nr;
|
||||
}
|
||||
|
||||
static ssize_t volopt_attr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
DECLARE_VOLOPT_INFO_KOBJ(kobj, vinf);
|
||||
struct super_block *sb = vinf->sb;
|
||||
struct scoutfs_volume_options volopt;
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
int ret;
|
||||
|
||||
ret = scoutfs_client_get_volopt(sb, &volopt);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
get_opt_data(attr, &volopt, &bit, &opt);
|
||||
|
||||
if (le64_to_cpu(volopt.set_bits) & bit) {
|
||||
return snprintf(buf, PAGE_SIZE, "%llu", le64_to_cpup(opt));
|
||||
} else {
|
||||
buf[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t volopt_attr_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
DECLARE_VOLOPT_INFO_KOBJ(kobj, vinf);
|
||||
struct super_block *sb = vinf->sb;
|
||||
struct scoutfs_volume_options volopt = {0,};
|
||||
u8 chars[32];
|
||||
__le64 *opt;
|
||||
u64 bit;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (count == 0)
|
||||
return 0;
|
||||
if (count > sizeof(chars) - 1)
|
||||
return -ERANGE;
|
||||
|
||||
get_opt_data(attr, &volopt, &bit, &opt);
|
||||
|
||||
if (buf[0] == '\n' || buf[0] == '\r') {
|
||||
volopt.set_bits = cpu_to_le64(bit);
|
||||
|
||||
ret = scoutfs_client_clear_volopt(sb, &volopt);
|
||||
} else {
|
||||
memcpy(chars, buf, count);
|
||||
chars[count] = '\0';
|
||||
ret = kstrtoull(chars, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
volopt.set_bits = cpu_to_le64(bit);
|
||||
*opt = cpu_to_le64(val);
|
||||
|
||||
ret = scoutfs_client_set_volopt(sb, &volopt);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
ret = count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The volume option sysfs files are slim shims around RPCs so this
|
||||
* should be called after the client is setup and before it is torn
|
||||
* down.
|
||||
*/
|
||||
int scoutfs_volopt_setup(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct volopt_info *vinf;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* persistent volume options are always a bitmap u64 then the 64 options */
|
||||
BUILD_BUG_ON(sizeof(struct scoutfs_volume_options) != (1 + 64) * 8);
|
||||
|
||||
vinf = kzalloc(sizeof(struct volopt_info), GFP_KERNEL);
|
||||
if (!vinf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scoutfs_sysfs_init_attrs(sb, &vinf->ssa);
|
||||
vinf->sb = sb;
|
||||
sbi->volopt_info = vinf;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(volopt_table); i++) {
|
||||
volopt_attrs[i] = (struct kobj_attribute) {
|
||||
.attr = { .name = volopt_table[i].name, .mode = S_IWUSR | S_IRUGO },
|
||||
.show = volopt_attr_show,
|
||||
.store = volopt_attr_store,
|
||||
};
|
||||
volopt_attr_ptrs[i] = &volopt_attrs[i].attr;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(volopt_table) != ARRAY_SIZE(volopt_attr_ptrs) - 1);
|
||||
volopt_attr_ptrs[i] = NULL;
|
||||
|
||||
ret = scoutfs_sysfs_create_attrs(sb, &vinf->ssa, volopt_attr_ptrs, "volume_options");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
scoutfs_volopt_destroy(sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scoutfs_volopt_destroy(struct super_block *sb)
|
||||
{
|
||||
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
|
||||
struct volopt_info *vinf = SCOUTFS_SB(sb)->volopt_info;
|
||||
|
||||
if (vinf) {
|
||||
scoutfs_sysfs_destroy_attrs(sb, &vinf->ssa);
|
||||
kfree(vinf);
|
||||
sbi->volopt_info = NULL;
|
||||
}
|
||||
}
|
||||
7
kmod/src/volopt.h
Normal file
7
kmod/src/volopt.h
Normal file
@@ -0,0 +1,7 @@
|
||||
#ifndef _SCOUTFS_VOLOPT_H_
|
||||
#define _SCOUTFS_VOLOPT_H_
|
||||
|
||||
int scoutfs_volopt_setup(struct super_block *sb);
|
||||
void scoutfs_volopt_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
1160
kmod/src/wkic.c
Normal file
1160
kmod/src/wkic.c
Normal file
File diff suppressed because it is too large
Load Diff
19
kmod/src/wkic.h
Normal file
19
kmod/src/wkic.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#ifndef _SCOUTFS_WKIC_H_
|
||||
#define _SCOUTFS_WKIC_H_
|
||||
|
||||
#include "format.h"
|
||||
|
||||
typedef int (*wkic_iter_cb_t)(struct scoutfs_key *key, void *val, unsigned int val_len,
|
||||
void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_iterate(struct super_block *sb, struct scoutfs_key *key, struct scoutfs_key *last,
|
||||
struct scoutfs_key *range_start, struct scoutfs_key *range_end,
|
||||
wkic_iter_cb_t cb, void *cb_arg);
|
||||
int scoutfs_wkic_iterate_stable(struct super_block *sb, struct scoutfs_key *key,
|
||||
struct scoutfs_key *last, struct scoutfs_key *range_start,
|
||||
struct scoutfs_key *range_end, wkic_iter_cb_t cb, void *cb_arg);
|
||||
|
||||
int scoutfs_wkic_setup(struct super_block *sb);
|
||||
void scoutfs_wkic_destroy(struct super_block *sb);
|
||||
|
||||
#endif
|
||||
950
kmod/src/xattr.c
950
kmod/src/xattr.c
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,39 @@
|
||||
#ifndef _SCOUTFS_XATTR_H_
|
||||
#define _SCOUTFS_XATTR_H_
|
||||
|
||||
ssize_t scoutfs_getxattr(struct dentry *dentry, const char *name, void *buffer,
|
||||
size_t size);
|
||||
int scoutfs_setxattr(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags);
|
||||
int scoutfs_removexattr(struct dentry *dentry, const char *name);
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
indx:1,
|
||||
srch:1,
|
||||
totl:1;
|
||||
};
|
||||
|
||||
extern const struct xattr_handler *scoutfs_xattr_handlers[];
|
||||
|
||||
int scoutfs_xattr_get_locked(struct inode *inode, const char *name, void *buffer, size_t size,
|
||||
struct scoutfs_lock *lck);
|
||||
int scoutfs_xattr_set_locked(struct inode *inode, const char *name, size_t name_len,
|
||||
const void *value, size_t size, int flags,
|
||||
const struct scoutfs_xattr_prefix_tags *tgs,
|
||||
struct scoutfs_lock *lck, struct scoutfs_lock *totl_lock,
|
||||
struct list_head *ind_locks);
|
||||
|
||||
ssize_t scoutfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
||||
ssize_t scoutfs_list_xattrs(struct inode *inode, char *buffer,
|
||||
size_t size, __u32 *hash_pos, __u64 *id_pos,
|
||||
bool e_range, bool show_hidden);
|
||||
|
||||
int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
|
||||
struct scoutfs_lock *lock);
|
||||
|
||||
struct scoutfs_xattr_prefix_tags {
|
||||
unsigned long hide:1,
|
||||
srch:1;
|
||||
};
|
||||
|
||||
int scoutfs_xattr_parse_tags(const char *name, unsigned int name_len,
|
||||
struct scoutfs_xattr_prefix_tags *tgs);
|
||||
|
||||
void scoutfs_xattr_init_totl_key(struct scoutfs_key *key, u64 *name);
|
||||
int scoutfs_xattr_combine_totl(void *dst, int dst_len, void *src, int src_len);
|
||||
|
||||
void scoutfs_xattr_indx_get_range(struct scoutfs_key *start, struct scoutfs_key *end);
|
||||
void scoutfs_xattr_init_indx_key(struct scoutfs_key *key, u8 major, u64 minor, u64 ino, u64 xid);
|
||||
void scoutfs_xattr_get_indx_key(struct scoutfs_key *key, u8 *major, u64 *minor, u64 *ino, u64 *xid);
|
||||
void scoutfs_xattr_set_indx_key_xid(struct scoutfs_key *key, u64 xid);
|
||||
|
||||
#endif
|
||||
|
||||
8
tests/.gitignore
vendored
8
tests/.gitignore
vendored
@@ -1,6 +1,14 @@
|
||||
src/*.d
|
||||
src/createmany
|
||||
src/dumb_renameat2
|
||||
src/dumb_setxattr
|
||||
src/handle_cat
|
||||
src/handle_fsetxattr
|
||||
src/bulk_create_paths
|
||||
src/find_xattrs
|
||||
src/stage_tmpfile
|
||||
src/create_xattr_loop
|
||||
src/o_tmpfile_umask
|
||||
src/o_tmpfile_linkat
|
||||
src/mmap_stress
|
||||
src/mmap_validate
|
||||
|
||||
1
tests/.xfstests-branch
Normal file
1
tests/.xfstests-branch
Normal file
@@ -0,0 +1 @@
|
||||
v2022.05.01-2-g787cd20
|
||||
@@ -1,12 +1,21 @@
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing
|
||||
CFLAGS := -Wall -O2 -Werror -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing -I ../kmod/src
|
||||
SHELL := /usr/bin/bash
|
||||
|
||||
# each binary command is built from a single .c file
|
||||
BIN := src/createmany \
|
||||
src/dumb_renameat2 \
|
||||
src/dumb_setxattr \
|
||||
src/handle_cat \
|
||||
src/handle_fsetxattr \
|
||||
src/bulk_create_paths \
|
||||
src/find_xattrs
|
||||
src/stage_tmpfile \
|
||||
src/find_xattrs \
|
||||
src/create_xattr_loop \
|
||||
src/fragmented_data_extents \
|
||||
src/o_tmpfile_umask \
|
||||
src/o_tmpfile_linkat \
|
||||
src/mmap_stress \
|
||||
src/mmap_validate
|
||||
|
||||
DEPS := $(wildcard src/*.d)
|
||||
|
||||
@@ -16,8 +25,10 @@ ifneq ($(DEPS),)
|
||||
-include $(DEPS)
|
||||
endif
|
||||
|
||||
src/mmap_stress: LIBS+=-lpthread
|
||||
|
||||
$(BIN): %: %.c Makefile
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@
|
||||
gcc $(CFLAGS) -MD -MP -MF $*.d $< -o $@ $(LIBS)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
|
||||
@@ -25,8 +25,9 @@ All options can be seen by running with -h.
|
||||
This script is built to test multi-node systems on one host by using
|
||||
different mounts of the same devices. The script creates a fake block
|
||||
device in front of each fs block device for each mount that will be
|
||||
tested. Currently it will create free loop devices and will mount on
|
||||
/mnt/test.[0-9].
|
||||
tested. It will create predictable device mapper devices and mounts
|
||||
them on /mnt/test.N. These static device names and mount paths limit
|
||||
the script to a single execution per host.
|
||||
|
||||
All tests will be run by default. Particular tests can be included or
|
||||
excluded by providing test name regular expressions with the -I and -E
|
||||
@@ -104,14 +105,15 @@ used during the test.
|
||||
|
||||
| Variable | Description | Origin | Example |
|
||||
| ---------------- | ------------------- | --------------- | ----------------- |
|
||||
| T\_MB[0-9] | per-mount meta bdev | created per run | /dev/loop0 |
|
||||
| T\_DB[0-9] | per-mount data bdev | created per run | /dev/loop1 |
|
||||
| T\_MB[0-9] | per-mount meta bdev | created per run | /dev/mapper/\_scoutfs\_test\_meta\_[0-9] |
|
||||
| T\_DB[0-9] | per-mount data bdev | created per run | /dev/mapper/\_scoutfs\_test\_data\_[0-9] |
|
||||
| T\_D[0-9] | per-mount test dir | made for test | /mnt/test.[0-9]/t |
|
||||
| T\_META\_DEVICE | main FS meta bdev | -M | /dev/vda |
|
||||
| T\_DATA\_DEVICE | main FS data bdev | -D | /dev/vdb |
|
||||
| T\_EX\_META\_DEV | scratch meta bdev | -f | /dev/vdd |
|
||||
| T\_EX\_DATA\_DEV | scratch meta bdev | -e | /dev/vdc |
|
||||
| T\_M[0-9] | mount paths | mounted per run | /mnt/test.[0-9]/ |
|
||||
| T\_MODULE | built kernel module | created per run | ../kmod/src/..ko |
|
||||
| T\_NR\_MOUNTS | number of mounts | -n | 3 |
|
||||
| T\_O[0-9] | mount options | created per run | -o server\_addr= |
|
||||
| T\_QUORUM | quorum count | -q | 2 |
|
||||
|
||||
43
tests/fenced-local-force-unmount.sh
Executable file
43
tests/fenced-local-force-unmount.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
#
|
||||
# This fencing script is used for testing clusters of multiple mounts on
|
||||
# a single host. It finds mounts to fence by looking for their rids and
|
||||
# only knows how to "fence" by using forced unmount.
|
||||
#
|
||||
|
||||
echo "$0 running rid '$SCOUTFS_FENCED_REQ_RID' ip '$SCOUTFS_FENCED_REQ_IP' args '$@'"
|
||||
|
||||
log() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo_fail() {
|
||||
echo "$@" > /dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
rid="$SCOUTFS_FENCED_REQ_RID"
|
||||
|
||||
for fs in /sys/fs/scoutfs/*; do
|
||||
[ ! -d "$fs" ] && continue
|
||||
|
||||
fs_rid="$(cat $fs/rid)" || \
|
||||
echo_fail "failed to get rid in $fs"
|
||||
if [ "$fs_rid" != "$rid" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
nr="$(cat $fs/data_device_maj_min)" || \
|
||||
echo_fail "failed to get data device major:minor in $fs"
|
||||
|
||||
mnts=$(findmnt -l -n -t scoutfs -o TARGET -S $nr) || \
|
||||
echo_fail "findmnt -t scoutfs -S $nr failed"
|
||||
for mnt in $mnts; do
|
||||
umount -f "$mnt" || \
|
||||
echo_fail "umout -f $mnt failed"
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
||||
@@ -7,8 +7,9 @@ t_status_msg()
|
||||
export T_PASS_STATUS=100
|
||||
export T_SKIP_STATUS=101
|
||||
export T_FAIL_STATUS=102
|
||||
export T_SKIP_PERMITTED_STATUS=103
|
||||
export T_FIRST_STATUS="$T_PASS_STATUS"
|
||||
export T_LAST_STATUS="$T_FAIL_STATUS"
|
||||
export T_LAST_STATUS="$T_SKIP_PERMITTED_STATUS"
|
||||
|
||||
t_pass()
|
||||
{
|
||||
@@ -21,6 +22,17 @@ t_skip()
|
||||
exit $T_SKIP_STATUS
|
||||
}
|
||||
|
||||
#
|
||||
# This exit code is *reserved* for tests that are up-front never going to work
|
||||
# in certain cases. This should be expressly documented per-case and made
|
||||
# abundantly clear before merging. The test itself should document its case.
|
||||
#
|
||||
t_skip_permitted()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
exit $T_SKIP_PERMITTED_STATUS
|
||||
}
|
||||
|
||||
t_fail()
|
||||
{
|
||||
t_status_msg "$@"
|
||||
@@ -35,10 +47,22 @@ t_fail()
|
||||
t_quiet()
|
||||
{
|
||||
echo "# $*" >> "$T_TMPDIR/quiet.log"
|
||||
"$@" > "$T_TMPDIR/quiet.log" 2>&1 || \
|
||||
"$@" >> "$T_TMPDIR/quiet.log" 2>&1 || \
|
||||
t_fail "quiet command failed"
|
||||
}
|
||||
|
||||
#
|
||||
# Quietly run a command during a test. The output is logged but only
|
||||
# the return code is printed, presumably because the output contains
|
||||
# a lot of invocation specific text that is difficult to filter.
|
||||
#
|
||||
t_rc()
|
||||
{
|
||||
echo "# $*" >> "$T_TMP.rc.log"
|
||||
"$@" >> "$T_TMP.rc.log" 2>&1
|
||||
echo "rc: $?"
|
||||
}
|
||||
|
||||
#
|
||||
# redirect test output back to the output of the invoking script intead
|
||||
# of the compared output.
|
||||
@@ -56,3 +80,15 @@ t_compare_output()
|
||||
{
|
||||
"$@" >&7 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# usually bash prints an annoying output message when jobs
|
||||
# are killed. We can avoid that by redirecting stderr for
|
||||
# the bash process when it reaps the jobs that are killed.
|
||||
#
|
||||
t_silent_kill() {
|
||||
exec {ERR}>&2 2>/dev/null
|
||||
kill "$@"
|
||||
wait "$@"
|
||||
exec 2>&$ERR {ERR}>&-
|
||||
}
|
||||
|
||||
@@ -6,6 +6,61 @@ t_filter_fs()
|
||||
-e 's@Device: [a-fA-F0-9]*h/[0-9]*d@Device: 0h/0d@g'
|
||||
}
|
||||
|
||||
#
|
||||
# We can hit a spurious kasan warning that was fixed upstream:
|
||||
#
|
||||
# e504e74cc3a2 x86/unwind/orc: Disable KASAN checking in the ORC unwinder, part 2
|
||||
#
|
||||
# KASAN can get mad when the unwinder doesn't find ORC metadata and
|
||||
# wanders up without using frames and hits the KASAN stack red zones.
|
||||
# We can ignore these messages.
|
||||
#
|
||||
# They're bracketed by:
|
||||
# [ 2687.690127] ==================================================================
|
||||
# [ 2687.691366] BUG: KASAN: stack-out-of-bounds in get_reg+0x1bc/0x230
|
||||
# ...
|
||||
# [ 2687.706220] ==================================================================
|
||||
# [ 2687.707284] Disabling lock debugging due to kernel taint
|
||||
#
|
||||
# That final lock debugging message may not be included.
|
||||
#
|
||||
ignore_harmless_unwind_kasan_stack_oob()
|
||||
{
|
||||
awk '
|
||||
BEGIN {
|
||||
in_soob = 0
|
||||
soob_nr = 0
|
||||
}
|
||||
( !in_soob && $0 ~ /==================================================================/ ) {
|
||||
in_soob = 1
|
||||
soob_nr = NR
|
||||
saved = $0
|
||||
}
|
||||
( in_soob == 1 && NR == (soob_nr + 1) ) {
|
||||
if (match($0, /KASAN: stack-out-of-bounds in get_reg/) != 0) {
|
||||
in_soob = 2
|
||||
} else {
|
||||
in_soob = 0
|
||||
print saved
|
||||
}
|
||||
saved=""
|
||||
}
|
||||
( in_soob == 2 && $0 ~ /==================================================================/ ) {
|
||||
in_soob = 3
|
||||
soob_nr = NR
|
||||
}
|
||||
( in_soob == 3 && NR > soob_nr && $0 !~ /Disabling lock debugging/ ) {
|
||||
in_soob = 0
|
||||
}
|
||||
( !in_soob ) { print $0 }
|
||||
END {
|
||||
if (saved) {
|
||||
print saved
|
||||
}
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
#
|
||||
# Filter out expected messages. Putting messages here implies that
|
||||
# tests aren't relying on messages to discover failures.. they're
|
||||
@@ -18,6 +73,7 @@ t_filter_dmesg()
|
||||
|
||||
# the kernel can just be noisy
|
||||
re=" used greatest stack depth: "
|
||||
re="$re|sched: RT throttling activated"
|
||||
|
||||
# mkfs/mount checks partition tables
|
||||
re="$re|unknown partition table"
|
||||
@@ -40,7 +96,7 @@ t_filter_dmesg()
|
||||
# mount and unmount spew a bunch
|
||||
re="$re|scoutfs.*client connected"
|
||||
re="$re|scoutfs.*client disconnected"
|
||||
re="$re|scoutfs.*server setting up"
|
||||
re="$re|scoutfs.*server starting"
|
||||
re="$re|scoutfs.*server ready"
|
||||
re="$re|scoutfs.*server accepted"
|
||||
re="$re|scoutfs.*server closing"
|
||||
@@ -52,15 +108,61 @@ t_filter_dmesg()
|
||||
|
||||
# tests that drop unmount io triggers fencing
|
||||
re="$re|scoutfs .* error: fencing "
|
||||
re="$re|scoutfs .*: waiting for .* lock clients"
|
||||
re="$re|scoutfs .*: all lock clients recovered"
|
||||
re="$re|scoutfs .*: waiting for .* clients"
|
||||
re="$re|scoutfs .*: all clients recovered"
|
||||
re="$re|scoutfs .* error: client rid.*lock recovery timed out"
|
||||
|
||||
# some tests mount w/o options
|
||||
# we test bad devices and options
|
||||
re="$re|scoutfs .* error: Required mount option \"metadev_path\" not found"
|
||||
re="$re|scoutfs .* error: meta_super META flag not set"
|
||||
re="$re|scoutfs .* error: could not open metadev:.*"
|
||||
re="$re|scoutfs .* error: Unknown or malformed option,.*"
|
||||
re="$re|scoutfs .* error: invalid quorum_heartbeat_timeout_ms value"
|
||||
|
||||
# in debugging kernels we can slow things down a bit
|
||||
re="$re|hrtimer: interrupt took .*"
|
||||
|
||||
egrep -v "($re)"
|
||||
# fencing tests force unmounts and trigger timeouts
|
||||
re="$re|scoutfs .* forcing unmount"
|
||||
re="$re|scoutfs .* reconnect timed out"
|
||||
re="$re|scoutfs .* recovery timeout expired"
|
||||
re="$re|scoutfs .* fencing previous leader"
|
||||
re="$re|scoutfs .* reclaimed resources"
|
||||
re="$re|scoutfs .* quorum .* error"
|
||||
re="$re|scoutfs .* error reading quorum block"
|
||||
re="$re|scoutfs .* error .* writing quorum block"
|
||||
re="$re|scoutfs .* error .* while checking to delete inode"
|
||||
re="$re|scoutfs .* error .*writing btree blocks.*"
|
||||
re="$re|scoutfs .* error .*writing super block.*"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.looping commit del.*upd freeing item"
|
||||
re="$re|scoutfs .* error .* freeing merged btree blocks.*.final commit del.upd freeing item"
|
||||
re="$re|scoutfs .* error .*reading quorum block.*to update event.*"
|
||||
re="$re|scoutfs .* error.*server failed to bind to.*"
|
||||
re="$re|scoutfs .* critical transaction commit failure.*"
|
||||
|
||||
# change-devices causes loop device resizing
|
||||
re="$re|loop: module loaded"
|
||||
re="$re|loop[0-9].* detected capacity change from.*"
|
||||
re="$re|dm-[0-9].* detected capacity change from.*"
|
||||
|
||||
# ignore systemd-journal rotating
|
||||
re="$re|systemd-journald.*"
|
||||
|
||||
# process accounting can be noisy
|
||||
re="$re|Process accounting resumed.*"
|
||||
|
||||
# format vers back/compat tries bad mounts
|
||||
re="$re|scoutfs .* error.*outside of supported version.*"
|
||||
re="$re|scoutfs .* error.*could not get .*super.*"
|
||||
|
||||
# ignore "unsafe core pattern" when xfstests tries to disable cores"
|
||||
re="$re|Unsafe core_pattern used with fs.suid_dumpable=2.*"
|
||||
re="$re|Pipe handler or fully qualified core dump path required.*"
|
||||
re="$re|Set kernel.core_pattern before fs.suid_dumpable.*"
|
||||
|
||||
# perf warning that it adjusted sample rate
|
||||
re="$re|perf: interrupt took too long.*lowering kernel.perf_event_max_sample_rate.*"
|
||||
|
||||
egrep -v "($re)" | \
|
||||
ignore_harmless_unwind_kasan_stack_oob
|
||||
}
|
||||
|
||||
@@ -17,14 +17,24 @@ t_sync_seq_index()
|
||||
t_quiet sync
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
#
|
||||
t_ident()
|
||||
t_mount_rid()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
local rid
|
||||
|
||||
rid=$(scoutfs statfs -s rid -p "$mnt")
|
||||
|
||||
echo "$rid"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given path
|
||||
# in a mounted scoutfs volume.
|
||||
#
|
||||
t_ident_from_mnt()
|
||||
{
|
||||
local mnt="$1"
|
||||
local fsid
|
||||
local rid
|
||||
|
||||
@@ -34,6 +44,38 @@ t_ident()
|
||||
echo "f.${fsid:0:6}.r.${rid:0:6}"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the "f.$fsid.r.$rid" identifier string for the given mount
|
||||
# number, 0 is used by default if none is specified.
|
||||
#
|
||||
t_ident()
|
||||
{
|
||||
local nr="${1:-0}"
|
||||
local mnt="$(eval echo \$T_M$nr)"
|
||||
|
||||
t_ident_from_mnt "$mnt"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_ident()
|
||||
{
|
||||
local ident="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$ident"
|
||||
}
|
||||
|
||||
#
|
||||
# Output the sysfs path for a path in a mounted fs.
|
||||
#
|
||||
t_sysfs_path_from_mnt()
|
||||
{
|
||||
local mnt="$1"
|
||||
|
||||
t_sysfs_path_from_ident $(t_ident_from_mnt $mnt)
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount's sysfs path, defaulting to mount 0 if none is
|
||||
# specified.
|
||||
@@ -42,7 +84,7 @@ t_sysfs_path()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
echo "/sys/fs/scoutfs/$(t_ident $nr)"
|
||||
t_sysfs_path_from_ident $(t_ident $nr)
|
||||
}
|
||||
|
||||
#
|
||||
@@ -64,6 +106,29 @@ t_fs_nrs()
|
||||
seq 0 $((T_NR_MOUNTS - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# output the fs nrs of quorum nodes, we "know" that
|
||||
# the quorum nrs are the first consequtive nrs
|
||||
#
|
||||
t_quorum_nrs()
|
||||
{
|
||||
seq 0 $((T_QUORUM - 1))
|
||||
}
|
||||
|
||||
#
|
||||
# outputs "1" if the fs number has "1" in its quorum/is_leader file.
|
||||
# All other cases output 0, including the fs nr being a client which
|
||||
# won't have a quorum/ dir.
|
||||
#
|
||||
t_fs_is_leader()
|
||||
{
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader 2>/dev/null)" == "1" ]; then
|
||||
echo "1"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Output the mount nr of the current server. This takes no steps to
|
||||
# ensure that the server doesn't shut down and have some other mount
|
||||
@@ -72,7 +137,7 @@ t_fs_nrs()
|
||||
t_server_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "1" ]; then
|
||||
if [ "$(t_fs_is_leader $i)" == "1" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -90,7 +155,7 @@ t_server_nr()
|
||||
t_first_client_nr()
|
||||
{
|
||||
for i in $(t_fs_nrs); do
|
||||
if [ "$(cat $(t_sysfs_path $i)/quorum/is_leader)" == "0" ]; then
|
||||
if [ "$(t_fs_is_leader $i)" == "0" ]; then
|
||||
echo $i
|
||||
return
|
||||
fi
|
||||
@@ -119,7 +184,27 @@ t_mount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet mount -t scoutfs \$T_O$nr \$T_DB$nr \$T_M$nr
|
||||
eval t_quiet mount -t scoutfs \$T_O$nr\$opt \$T_DB$nr \$T_M$nr
|
||||
}
|
||||
|
||||
#
|
||||
# Mount with an optional mount option string. If the string is empty
|
||||
# then the saved mount options are used. If the string has contents
|
||||
# then it is appended to the end of the saved options with a separating
|
||||
# comma.
|
||||
#
|
||||
# Unlike t_mount this won't inherently fail in t_quiet, errors are
|
||||
# returned so bad options can be tested.
|
||||
#
|
||||
t_mount_opt()
|
||||
{
|
||||
local nr="$1"
|
||||
local opt="${2:+,$2}"
|
||||
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval mount -t scoutfs \$T_O$nr\$opt \$T_DB$nr \$T_M$nr
|
||||
}
|
||||
|
||||
t_umount()
|
||||
@@ -129,7 +214,17 @@ t_umount()
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount \$T_M$i
|
||||
eval t_quiet umount \$T_M$nr
|
||||
}
|
||||
|
||||
t_force_umount()
|
||||
{
|
||||
local nr="$1"
|
||||
|
||||
test "$nr" -lt "$T_NR_MOUNTS" || \
|
||||
t_fail "fs nr $nr invalid"
|
||||
|
||||
eval t_quiet umount -f \$T_M$nr
|
||||
}
|
||||
|
||||
#
|
||||
@@ -201,6 +296,15 @@ t_trigger_get() {
|
||||
cat "$(t_trigger_path "$nr")/$which"
|
||||
}
|
||||
|
||||
t_trigger_set() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local val="$3"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo "$val" > "$path/$which"
|
||||
}
|
||||
|
||||
t_trigger_show() {
|
||||
local which="$1"
|
||||
local string="$2"
|
||||
@@ -212,9 +316,8 @@ t_trigger_show() {
|
||||
t_trigger_arm_silent() {
|
||||
local which="$1"
|
||||
local nr="$2"
|
||||
local path=$(t_trigger_path "$nr")
|
||||
|
||||
echo 1 > "$path/$which"
|
||||
t_trigger_set "$which" "$nr" 1
|
||||
}
|
||||
|
||||
t_trigger_arm() {
|
||||
@@ -277,3 +380,121 @@ t_counter_diff_changed() {
|
||||
echo "counter $which didn't change" ||
|
||||
echo "counter $which changed"
|
||||
}
|
||||
|
||||
#
|
||||
# See if we can find a local mount with the caller's rid.
|
||||
#
|
||||
t_rid_is_mounted() {
|
||||
local rid="$1"
|
||||
local fr="$1"
|
||||
|
||||
for fr in /sys/fs/scoutfs/*; do
|
||||
if [ "$(cat $fr/rid)" == "$rid" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# A given mount is being fenced if any mount has a fence request pending
|
||||
# for it which hasn't finished and been removed.
|
||||
#
|
||||
t_rid_is_fencing() {
|
||||
local rid="$1"
|
||||
local fr
|
||||
|
||||
for fr in /sys/fs/scoutfs/*; do
|
||||
if [ -d "$fr/fence/$rid" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# Wait until the mount identified by the first rid arg is not in any
|
||||
# states specified by the remaining state description word args.
|
||||
#
|
||||
t_wait_if_rid_is() {
|
||||
local rid="$1"
|
||||
|
||||
while ( [[ $* =~ mounted ]] && t_rid_is_mounted $rid ) ||
|
||||
( [[ $* =~ fencing ]] && t_rid_is_fencing $rid ) ; do
|
||||
sleep .5
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Wait until any mount identifies itself as the elected leader. We can
|
||||
# be waiting while tests mount and unmount so mounts may not be mounted
|
||||
# at the test's expected mount points.
|
||||
#
|
||||
t_wait_for_leader() {
|
||||
local i
|
||||
|
||||
while sleep .25; do
|
||||
for i in $(t_fs_nrs); do
|
||||
local ldr="$(t_sysfs_path $i 2>/dev/null)/quorum/is_leader"
|
||||
if [ "$(cat $ldr 2>/dev/null)" == "1" ]; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
t_get_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
cat "$opt"
|
||||
}
|
||||
|
||||
t_set_sysfs_mount_option() {
|
||||
local nr="$1"
|
||||
local name="$2"
|
||||
local val="$3"
|
||||
local opt="$(t_sysfs_path $nr)/mount_options/$name"
|
||||
|
||||
echo "$val" > "$opt" 2>/dev/null
|
||||
}
|
||||
|
||||
t_set_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local val="$2"
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
t_set_sysfs_mount_option $i $name $val
|
||||
done
|
||||
}
|
||||
|
||||
declare -A _saved_opts
|
||||
t_save_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local opt
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
opt="$(t_sysfs_path $i)/mount_options/$name"
|
||||
ind="${name}_${i}"
|
||||
|
||||
_saved_opts[$ind]="$(cat $opt)"
|
||||
done
|
||||
}
|
||||
|
||||
t_restore_all_sysfs_mount_options() {
|
||||
local name="$1"
|
||||
local ind
|
||||
local i
|
||||
|
||||
for i in $(t_fs_nrs); do
|
||||
ind="${name}_${i}"
|
||||
|
||||
t_set_sysfs_mount_option $i $name "${_saved_opts[$ind]}"
|
||||
done
|
||||
}
|
||||
|
||||
88
tests/funcs/tap.sh
Normal file
88
tests/funcs/tap.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
#
|
||||
# Generate TAP format test results
|
||||
#
|
||||
|
||||
t_tap_header()
|
||||
{
|
||||
local runid=$1
|
||||
local sequence=( $(echo $tests) )
|
||||
local count=${#sequence[@]}
|
||||
|
||||
# avoid recreating the same TAP result over again - harness sets this
|
||||
[[ -z "$runid" ]] && runid="*test*"
|
||||
|
||||
cat > $T_RESULTS/scoutfs.tap <<TAPEOF
|
||||
TAP version 14
|
||||
1..${count}
|
||||
#
|
||||
# TAP results for run ${runid}
|
||||
#
|
||||
# host/run info:
|
||||
#
|
||||
# hostname: ${HOSTNAME}
|
||||
# test start time: $(date --utc)
|
||||
# uname -r: $(uname -r)
|
||||
# scoutfs commit id: $(git describe --tags)
|
||||
#
|
||||
# sequence for this run:
|
||||
#
|
||||
TAPEOF
|
||||
|
||||
# Sequence
|
||||
for t in ${tests}; do
|
||||
echo ${t/.sh/}
|
||||
done | cat -n | expand | column -c 120 | expand | sed 's/^ /#/' >> $T_RESULTS/scoutfs.tap
|
||||
echo "#" >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
|
||||
t_tap_progress()
|
||||
{
|
||||
(
|
||||
local i=$(( testcount + 1 ))
|
||||
local testname=$1
|
||||
local result=$2
|
||||
|
||||
local diff=""
|
||||
local dmsg=""
|
||||
|
||||
if [[ -s "$T_RESULTS/tmp/${testname}/dmesg.new" ]]; then
|
||||
dmsg="1"
|
||||
fi
|
||||
|
||||
if ! cmp -s golden/${testname} $T_RESULTS/output/${testname}; then
|
||||
diff="1"
|
||||
fi
|
||||
|
||||
if [[ "${result}" == "100" ]] && [[ -z "${dmsg}" ]] && [[ -z "${diff}" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
elif [[ "${result}" == "103" ]]; then
|
||||
echo "ok ${i} - ${testname}"
|
||||
echo "# ${testname} ** skipped - permitted **"
|
||||
else
|
||||
echo "not ok ${i} - ${testname}"
|
||||
case ${result} in
|
||||
101)
|
||||
echo "# ${testname} ** skipped **"
|
||||
;;
|
||||
102)
|
||||
echo "# ${testname} ** failed **"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "#"
|
||||
echo "# diff:"
|
||||
echo "#"
|
||||
diff -u golden/${testname} $T_RESULTS/output/${testname} | expand | sed 's/^/# /'
|
||||
fi
|
||||
|
||||
if [[ -n "${dmsg}" ]]; then
|
||||
echo "#"
|
||||
echo "# dmesg:"
|
||||
echo "#"
|
||||
cat "$T_RESULTS/tmp/${testname}/dmesg.new" | sed 's/^/# /'
|
||||
fi
|
||||
fi
|
||||
) >> $T_RESULTS/scoutfs.tap
|
||||
}
|
||||
6
tests/golden/basic-bad-mounts
Normal file
6
tests/golden/basic-bad-mounts
Normal file
@@ -0,0 +1,6 @@
|
||||
== prepare devices, mount point, and logs
|
||||
== bad devices, bad options
|
||||
== swapped devices
|
||||
== both meta devices
|
||||
== both data devices
|
||||
== good volume, bad option and good options
|
||||
155
tests/golden/basic-posix-acl
Normal file
155
tests/golden/basic-posix-acl
Normal file
@@ -0,0 +1,155 @@
|
||||
== setup test directory
|
||||
== getfacl
|
||||
directory drwxr-xr-x 0 0 0 '.'
|
||||
# file: .
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
|
||||
== basic non-acl access through permissions
|
||||
directory drwxr-xr-x 0 44444 0 'dir-testuid'
|
||||
touch: cannot touch 'dir-testuid/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-testuid/symlink-file-group-write': Permission denied
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'dir-testuid/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 44444 0 'symlinkdir-testuid/symlink-file-group-write'
|
||||
== basic acl access
|
||||
directory drwxr-xr-x 0 0 0 'dir-root'
|
||||
touch: cannot touch 'dir-root/file-group-write': Permission denied
|
||||
touch: cannot touch 'symlinkdir-root/file-group-write': Permission denied
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
regular empty file -rw-r--r-- 22222 0 0 'dir-root/file-group-write'
|
||||
regular empty file -rw-r--r-- 22222 0 0 'symlinkdir-root/file-group-write'
|
||||
== directory exec
|
||||
Success
|
||||
Success
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Failed
|
||||
Failed
|
||||
# file: dir-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rw-
|
||||
group::r-x
|
||||
group:44444:rwx
|
||||
mask::rwx
|
||||
other::r-x
|
||||
|
||||
Success
|
||||
Success
|
||||
== get/set attr
|
||||
regular empty file -rw-r--r-- 0 0 0 'file-root'
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rw-
|
||||
group::r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
mask::r--
|
||||
other::r--
|
||||
|
||||
setfattr: file-root: Permission denied
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
|
||||
# file: file-root
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
group:44444:rw-
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
# file: file-root
|
||||
user.test2="Success"
|
||||
user.test4="Success"
|
||||
|
||||
== inheritance / default acl
|
||||
directory drwxr-xr-x 0 0 0 'dir-root2'
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
group::r-x
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
mkdir: cannot create directory 'dir-root2/dir': Permission denied
|
||||
touch: cannot touch 'dir-root2/dir/file': No such file or directory
|
||||
# file: dir-root2
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
directory drwxrwxr-x 22222 0 4 'dir-root2/dir'
|
||||
# file: dir-root2/dir
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rwx
|
||||
user:22222:rwx
|
||||
group::r-x
|
||||
mask::rwx
|
||||
other::r-x
|
||||
default:user::rwx
|
||||
default:user:22222:rwx
|
||||
default:group::r-x
|
||||
default:mask::rwx
|
||||
default:other::r-x
|
||||
|
||||
regular empty file -rw-rw-r-- 22222 0 0 'dir-root2/dir/file'
|
||||
# file: dir-root2/dir/file
|
||||
# owner: 22222
|
||||
# group: root
|
||||
user::rw-
|
||||
user:22222:rwx #effective:rw-
|
||||
group::r-x #effective:r--
|
||||
mask::rw-
|
||||
other::r--
|
||||
|
||||
== cleanup
|
||||
@@ -47,9 +47,13 @@ four
|
||||
--- dir within dir
|
||||
--- overwrite file
|
||||
--- can't overwrite non-empty dir
|
||||
mv: cannot move ‘/mnt/test/test/basic-posix-consistency/dir/c/clobber’ to ‘/mnt/test/test/basic-posix-consistency/dir/a/dir’: Directory not empty
|
||||
mv: cannot move '/mnt/test/test/basic-posix-consistency/dir/c/clobber' to '/mnt/test/test/basic-posix-consistency/dir/a/dir': Directory not empty
|
||||
--- can overwrite empty dir
|
||||
--- can rename into root
|
||||
== path resoluion
|
||||
== inode indexes match after syncing existing
|
||||
== inode indexes match after copying and syncing
|
||||
== inode indexes match after removing and syncing
|
||||
== concurrent creates make one file
|
||||
one-file
|
||||
== cleanup
|
||||
|
||||
6
tests/golden/basic-truncate
Normal file
6
tests/golden/basic-truncate
Normal file
@@ -0,0 +1,6 @@
|
||||
== truncate writes zeroed partial end of file block
|
||||
0000000 0a79 0a79 0a79 0a79 0a79 0a79 0a79 0a79
|
||||
*
|
||||
0006144 0000 0000 0000 0000 0000 0000 0000 0000
|
||||
*
|
||||
0012288
|
||||
@@ -1,52 +1,2 @@
|
||||
== create shared test file
|
||||
== set and get xattrs between mount pairs while retrying
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="1"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="2"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="3"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="4"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="5"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="6"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="7"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="8"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="9"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
counter block_cache_remove_stale changed
|
||||
# file: /mnt/test/test/block-stale-reads/file
|
||||
user.xat="10"
|
||||
|
||||
counter block_cache_remove_stale changed
|
||||
== Issue scoutfs df to force block reads to trigger stale invalidation/retry
|
||||
counter block_cache_remove_stale changed
|
||||
|
||||
28
tests/golden/change-devices
Normal file
28
tests/golden/change-devices
Normal file
@@ -0,0 +1,28 @@
|
||||
== make tmp sparse data dev files
|
||||
== make scratch fs
|
||||
== small new data device fails
|
||||
rc: 1
|
||||
== check sees data device errors
|
||||
rc: 1
|
||||
rc: 0
|
||||
== preparing while mounted fails
|
||||
rc: 1
|
||||
== preparing without recovery fails
|
||||
rc: 1
|
||||
== check sees metadata errors
|
||||
rc: 1
|
||||
rc: 1
|
||||
== preparing with file data fails
|
||||
rc: 1
|
||||
== preparing after emptied
|
||||
rc: 0
|
||||
== checks pass
|
||||
rc: 0
|
||||
rc: 0
|
||||
== using prepared
|
||||
== preparing larger and resizing
|
||||
rc: 0
|
||||
equal_prepared
|
||||
large_prepared
|
||||
resized larger test rc: 0
|
||||
== cleanup
|
||||
1
tests/golden/client-unmount-recovery
Normal file
1
tests/golden/client-unmount-recovery
Normal file
@@ -0,0 +1 @@
|
||||
== 60s of unmounting non-quorum clients during recovery
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user