mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-23 01:50:35 +00:00
Compare commits
484 Commits
branch-0.1
...
branch-0.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91194f0ac0 | ||
|
|
aaba5a6f7c | ||
|
|
eaabbba14d | ||
|
|
8ce8a0b84c | ||
|
|
f74cdaa184 | ||
|
|
a461a21434 | ||
|
|
9178e96cce | ||
|
|
32d5a81b73 | ||
|
|
35af260ca9 | ||
|
|
1d22543f59 | ||
|
|
1c040898e8 | ||
|
|
249967d62b | ||
|
|
9d8bc9e3cc | ||
|
|
f849c4b9c2 | ||
|
|
0e7e68236a | ||
|
|
94802ce842 | ||
|
|
5362765835 | ||
|
|
f87d9ddd34 | ||
|
|
ca6078d5eb | ||
|
|
4701e698b9 | ||
|
|
6b4b983f9d | ||
|
|
17294a3bc7 | ||
|
|
b3504f9b1f | ||
|
|
a3d55ba882 | ||
|
|
33b195760b | ||
|
|
3bf69db90c | ||
|
|
6aac944588 | ||
|
|
37e00d3527 | ||
|
|
a165d6aa01 | ||
|
|
9eb8aa7ea3 | ||
|
|
f7f996d473 | ||
|
|
6259dd73b6 | ||
|
|
f4849000c8 | ||
|
|
6634f37464 | ||
|
|
2344bd806d | ||
|
|
6bea503f9a | ||
|
|
c2a98807c7 | ||
|
|
ac5f92db70 | ||
|
|
f78deffdc8 | ||
|
|
6e5916161c | ||
|
|
4d23712c9c | ||
|
|
25c898fe9c | ||
|
|
e0e8e9a1ed | ||
|
|
01ee5d002a | ||
|
|
f205ae30c3 | ||
|
|
5aab69e72f | ||
|
|
7f88db8625 | ||
|
|
a9a33d5a99 | ||
|
|
491eae58a4 | ||
|
|
7e6e90dc52 | ||
|
|
4b75815306 | ||
|
|
62228726a3 | ||
|
|
1bbc1920d2 | ||
|
|
306bab9ead | ||
|
|
55b76a8963 | ||
|
|
b3c7305d25 | ||
|
|
6c6b1c4ba7 | ||
|
|
49daba2599 | ||
|
|
00311817bd | ||
|
|
83eb36796f | ||
|
|
1469cec5bf | ||
|
|
f573059698 | ||
|
|
c5f377eb8b | ||
|
|
6f1db4fb72 | ||
|
|
a9f96d1f5a | ||
|
|
2439a2a982 | ||
|
|
a28ba9cde8 | ||
|
|
4e886f0399 | ||
|
|
7f0634b429 | ||
|
|
ca66bea619 | ||
|
|
7bc49a1efe | ||
|
|
8218ab7922 | ||
|
|
42c0acfc44 | ||
|
|
3c47844e8c | ||
|
|
77a87cb2b6 | ||
|
|
5cdbc3701a | ||
|
|
345f739f28 | ||
|
|
42a5c8b92e | ||
|
|
58154333e8 | ||
|
|
a772938e73 | ||
|
|
ed0607c10e | ||
|
|
4641dfff24 | ||
|
|
27c678b2a5 | ||
|
|
4482406aee | ||
|
|
3b6eba1344 | ||
|
|
3884f1c8b6 | ||
|
|
122f03e689 | ||
|
|
8bbe4bdbd4 | ||
|
|
3a3867588a | ||
|
|
0754e9a34d | ||
|
|
f46cba7bc8 | ||
|
|
fa0f00e9d2 | ||
|
|
5bc532261f | ||
|
|
91bde5e2e8 | ||
|
|
6f08c4facb | ||
|
|
6dc6b40495 | ||
|
|
ceb9f4d647 | ||
|
|
5299cece4c | ||
|
|
25a4d6686a | ||
|
|
028283869f | ||
|
|
b908d3a5e9 | ||
|
|
38df381206 | ||
|
|
f70aab2fbb | ||
|
|
5613979a85 | ||
|
|
a182a33d4d | ||
|
|
1bb91399cd | ||
|
|
d683d9cfb0 | ||
|
|
703807b67d | ||
|
|
d8de1099eb | ||
|
|
f896f9a908 | ||
|
|
e9789dd68c | ||
|
|
842b13325d | ||
|
|
fd811dd707 | ||
|
|
f2e1be0fc1 | ||
|
|
4f603d285e | ||
|
|
17f2038949 | ||
|
|
25398d5a1d | ||
|
|
201d439440 | ||
|
|
06b19867d8 | ||
|
|
9912a68364 | ||
|
|
5f2d97c0b8 | ||
|
|
7254c87776 | ||
|
|
d493b2a7f7 | ||
|
|
e2e869f150 | ||
|
|
01e301fc9a | ||
|
|
ec4d1dd7fd | ||
|
|
a3e84c1553 | ||
|
|
7b1e78ffcd | ||
|
|
5cdc886f84 | ||
|
|
af88a6864a | ||
|
|
e18cf96b01 | ||
|
|
e48f8d25e2 | ||
|
|
d79d8b00f3 | ||
|
|
baea79d17d | ||
|
|
6dbee22d2e | ||
|
|
ff93a840ae | ||
|
|
83e6c428be | ||
|
|
7eb0da3ed8 | ||
|
|
a63871f024 | ||
|
|
687d88dd0c | ||
|
|
80f1b9781a | ||
|
|
1ba20f4efd | ||
|
|
b172146223 | ||
|
|
a934e31379 | ||
|
|
98b34ecc67 | ||
|
|
e2391b02da | ||
|
|
2c16d1f980 | ||
|
|
bc8d3f0d24 | ||
|
|
a137006d27 | ||
|
|
992390feda | ||
|
|
270e713fea | ||
|
|
6b7ca7e334 | ||
|
|
5556861ac0 | ||
|
|
19f2dc9ef9 | ||
|
|
5b97604735 | ||
|
|
14482d4496 | ||
|
|
e1168c0a34 | ||
|
|
7b8c557f30 | ||
|
|
a071cdce66 | ||
|
|
5bbc902eec | ||
|
|
c7be350961 | ||
|
|
f74c665671 | ||
|
|
6c48ce065e | ||
|
|
a699bc20bc | ||
|
|
5323b29699 | ||
|
|
5d37ee29f8 | ||
|
|
b4e5b9dcf1 | ||
|
|
ece6c68288 | ||
|
|
8362622eb8 | ||
|
|
0129e42b06 | ||
|
|
f7087da054 | ||
|
|
05de462fa9 | ||
|
|
9dc8d98146 | ||
|
|
a03056a915 | ||
|
|
e0604b066a | ||
|
|
592b64e478 | ||
|
|
830041d6be | ||
|
|
a2577fefb9 | ||
|
|
5f3a46eabb | ||
|
|
2b0a504cbc | ||
|
|
740e2166c5 | ||
|
|
f78a80dfa3 | ||
|
|
85edc3de07 | ||
|
|
75df23dd3c | ||
|
|
4440f9b85b | ||
|
|
2aa96eb00f | ||
|
|
a064181d7c | ||
|
|
1c05d7b927 | ||
|
|
7fab0ee867 | ||
|
|
04291dec28 | ||
|
|
ee551d070f | ||
|
|
2137ab5522 | ||
|
|
e2fbd146d7 | ||
|
|
affab296b0 | ||
|
|
0d2fb9c99d | ||
|
|
58225216b3 | ||
|
|
fb27d682ad | ||
|
|
69b7028f84 | ||
|
|
4785798904 | ||
|
|
ce6dd0f8f8 | ||
|
|
513ab87b47 | ||
|
|
b1b830bcbb | ||
|
|
f1306d3771 | ||
|
|
cc5cc7117d | ||
|
|
9dbd5a92d0 | ||
|
|
c130381284 | ||
|
|
378a97b66b | ||
|
|
fd8e5c7e4c | ||
|
|
673788ed46 | ||
|
|
36cea4313e | ||
|
|
54aaa58899 | ||
|
|
a8db2b28c7 | ||
|
|
c5950c7bf7 | ||
|
|
f60021f87f | ||
|
|
61be9fb02d | ||
|
|
1351c1cc13 | ||
|
|
29e2ad7fd8 | ||
|
|
3f6d47f1f2 | ||
|
|
e11b828b6f | ||
|
|
a4d8e99f1c | ||
|
|
876e770df6 | ||
|
|
27efe8bde9 | ||
|
|
fcebf6f72d | ||
|
|
f3bad2032d | ||
|
|
412c2a1e5b | ||
|
|
764d913d84 | ||
|
|
6a2a0d628b | ||
|
|
c69c02c162 | ||
|
|
c49dd5c576 | ||
|
|
71c1b2fe69 | ||
|
|
c0630bedc2 | ||
|
|
cf96d68478 | ||
|
|
718fea7048 | ||
|
|
77513a40db | ||
|
|
16006949d0 | ||
|
|
e2cd40e3bc | ||
|
|
1bf18679bb | ||
|
|
613c7e8226 | ||
|
|
29847edd4e | ||
|
|
ad80845a1d | ||
|
|
cb18a305db | ||
|
|
b4daf31fcf | ||
|
|
91005ba366 | ||
|
|
2e379179d3 | ||
|
|
09099081e1 | ||
|
|
18a6c25434 | ||
|
|
90666cf558 | ||
|
|
659624a2ee | ||
|
|
7340a5a83a | ||
|
|
1271ad6894 | ||
|
|
56e55cd272 | ||
|
|
ffce7a7af8 | ||
|
|
1965e8751b | ||
|
|
d903bd0dba | ||
|
|
4f57f0cdae | ||
|
|
5b170d1ffe | ||
|
|
7d656fe127 | ||
|
|
d3120b3c2f | ||
|
|
434a9e211e | ||
|
|
6f8f4816a5 | ||
|
|
8a9374b331 | ||
|
|
3f5e9baa17 | ||
|
|
d1eaccd234 | ||
|
|
2f86feb581 | ||
|
|
5453cfbab7 | ||
|
|
2876c9ebc7 | ||
|
|
c2b53c5282 | ||
|
|
9abefbfa28 | ||
|
|
c765b38599 | ||
|
|
c1e95dd893 | ||
|
|
6b73d6bb36 | ||
|
|
96a42a9c69 | ||
|
|
b0edaa5bb7 | ||
|
|
68e1b1f613 | ||
|
|
7fe6d2dc07 | ||
|
|
ca278e6b52 | ||
|
|
7c480f29c3 | ||
|
|
e683d57768 | ||
|
|
5604830389 | ||
|
|
1f0cb9066b | ||
|
|
31341787e4 | ||
|
|
aed403efc2 | ||
|
|
cf734132e7 | ||
|
|
93c1035f6e | ||
|
|
c3a9d342f4 | ||
|
|
f4bd089b83 | ||
|
|
3a95a9cbe6 | ||
|
|
19bb50f450 | ||
|
|
21f84d77fc | ||
|
|
2f2a4e83e0 | ||
|
|
fe3164714f | ||
|
|
d236b01b48 | ||
|
|
d3aef2c1a5 | ||
|
|
500ee99c93 | ||
|
|
9e5ee17f4a | ||
|
|
31b50a83d1 | ||
|
|
0cf112501e | ||
|
|
0ebcb1ddef | ||
|
|
1c480554eb | ||
|
|
955e766a49 | ||
|
|
893849f8af | ||
|
|
9ebae12614 | ||
|
|
142f29483a | ||
|
|
f1bc882b90 | ||
|
|
937474bf14 | ||
|
|
c5e35ac57e | ||
|
|
f30fbd53ff | ||
|
|
934c963d85 | ||
|
|
8d6200c036 | ||
|
|
a5d91519f2 | ||
|
|
c96bc8bbd2 | ||
|
|
a6065397d9 | ||
|
|
5abdc4323a | ||
|
|
67d0f9c7df | ||
|
|
08b762111e | ||
|
|
1e8752d55e | ||
|
|
2575a4602e | ||
|
|
3a3c7f7e79 | ||
|
|
91d396760e | ||
|
|
383d7ccf4d | ||
|
|
77b4fc74cd | ||
|
|
ff67285091 | ||
|
|
786d66cacf | ||
|
|
02732f19f2 | ||
|
|
31fca82213 | ||
|
|
62c0be376c | ||
|
|
ca451acb41 | ||
|
|
2ccb5feabd | ||
|
|
28ef8feffa | ||
|
|
f4706c7050 | ||
|
|
5998ee718a | ||
|
|
ba1e6adf2a | ||
|
|
521d9b62dd | ||
|
|
218fdebbeb | ||
|
|
9083a0e5a7 | ||
|
|
156f760663 | ||
|
|
ae42ec7832 | ||
|
|
75ab27f683 | ||
|
|
c96b0453f2 | ||
|
|
14fab1e36d | ||
|
|
39e17e04bc | ||
|
|
71182a0a95 | ||
|
|
1c7d8bc0e9 | ||
|
|
95c67a3e2e | ||
|
|
ab8fe49b4f | ||
|
|
2c7861b7ed | ||
|
|
23b69d610c | ||
|
|
6db2ddc53b | ||
|
|
f06e50a569 | ||
|
|
b862165279 | ||
|
|
681718f738 | ||
|
|
eed43b276c | ||
|
|
19d7d30e67 | ||
|
|
ca43a5f2ca | ||
|
|
721d0f9f64 | ||
|
|
78813d9617 | ||
|
|
7492b662c8 | ||
|
|
8c4ce0efee | ||
|
|
4b8b3e7841 | ||
|
|
9dfc08cf9f | ||
|
|
ce046d07da | ||
|
|
f63002a75e | ||
|
|
dfc95587d6 | ||
|
|
c8a6883987 | ||
|
|
dd2bf81131 | ||
|
|
268f7acd11 | ||
|
|
fe5e983152 | ||
|
|
15606fc8d3 | ||
|
|
5712aa4d48 | ||
|
|
f113a57ba1 | ||
|
|
bb67258c68 | ||
|
|
9fd73417ae | ||
|
|
a21af32eed | ||
|
|
f1c681de60 | ||
|
|
82f3f76e74 | ||
|
|
c814be253a | ||
|
|
c3e68ad5a4 | ||
|
|
e99e418238 | ||
|
|
df857eb8c6 | ||
|
|
e555ad6370 | ||
|
|
06e9836a66 | ||
|
|
35b75e9b67 | ||
|
|
fbec0d0254 | ||
|
|
683e29e0bd | ||
|
|
936575efd2 | ||
|
|
24f9c2f85e | ||
|
|
cc2e0dc5d8 | ||
|
|
b8e392e6fe | ||
|
|
1d043936af | ||
|
|
d4eb09704f | ||
|
|
0d8b4af9a0 | ||
|
|
c8cc8a0b0f | ||
|
|
f37979eeff | ||
|
|
510ed4a1a0 | ||
|
|
bd5ca7b998 | ||
|
|
7d93861b28 | ||
|
|
1acd47ddce | ||
|
|
164ccd0525 | ||
|
|
87fae37c9c | ||
|
|
a611a235ee | ||
|
|
67eb176029 | ||
|
|
e77fbfe709 | ||
|
|
f659e7bc10 | ||
|
|
3a30486d30 | ||
|
|
c80deeb6fe | ||
|
|
1890d276b9 | ||
|
|
ea0f3504c2 | ||
|
|
cd9f5e38f7 | ||
|
|
849464670c | ||
|
|
206acd8b5b | ||
|
|
2729d5dd71 | ||
|
|
6a31daffd6 | ||
|
|
19770268be | ||
|
|
db49a196da | ||
|
|
c6644d5721 | ||
|
|
77ceeee2a8 | ||
|
|
77daa7a59f | ||
|
|
012ab24469 | ||
|
|
d8658d4536 | ||
|
|
4540036a01 | ||
|
|
ed2481db7f | ||
|
|
c58ae5432c | ||
|
|
1a439f2259 | ||
|
|
921dc9ab9f | ||
|
|
cd6054253c | ||
|
|
d05a5fbeb4 | ||
|
|
3926748594 | ||
|
|
c1cfec3e5a | ||
|
|
11eb9a6260 | ||
|
|
39f70a043d | ||
|
|
eed11583cb | ||
|
|
cf9d9e2ced | ||
|
|
0d1e5c3961 | ||
|
|
e92a759dad | ||
|
|
dc02d76aee | ||
|
|
887c0a36ec | ||
|
|
1c1f9bed09 | ||
|
|
43d4e62b5a | ||
|
|
d47ea88aa8 | ||
|
|
84de936e43 | ||
|
|
944e28cd6c | ||
|
|
d986a4d875 | ||
|
|
1d6c081766 | ||
|
|
c8b9a6fa06 | ||
|
|
b95521194e | ||
|
|
b1c92f377d | ||
|
|
b860f6a393 | ||
|
|
c96d826fe0 | ||
|
|
da072b8814 | ||
|
|
d820c83141 | ||
|
|
1adb27e283 | ||
|
|
527edd69ae | ||
|
|
044dcf43de | ||
|
|
3d0d02816d | ||
|
|
542b1394d7 | ||
|
|
ddfd417c13 | ||
|
|
7959c12073 | ||
|
|
a383f91b68 | ||
|
|
0498cebc58 | ||
|
|
b8c8473505 | ||
|
|
6fd3c81db5 | ||
|
|
e252475e67 | ||
|
|
f03480c054 | ||
|
|
2b87c8c372 | ||
|
|
b2fef14ada | ||
|
|
c30c1bb1ec | ||
|
|
38f77bbfe5 | ||
|
|
1d63ec4143 | ||
|
|
afd44a6e08 | ||
|
|
2febae90c9 | ||
|
|
2db219f074 | ||
|
|
0cfdca55f3 | ||
|
|
ba68436f2f | ||
|
|
d196b034e2 | ||
|
|
de6cf8db51 | ||
|
|
0c8f906b2d | ||
|
|
1549a43823 | ||
|
|
cc343eb928 | ||
|
|
efdfc78c0c | ||
|
|
0776ca1c52 | ||
|
|
e7f58491c3 | ||
|
|
0ec0a5703b | ||
|
|
6d90eebfb9 | ||
|
|
9c16b56b65 |
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
VERSION=0.10
|
||||
VERSION=0.11.1
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -26,6 +26,25 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/compaction_manager/compaction_history",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get List of the compaction history",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"history"
|
||||
},
|
||||
"nickname":"get_compaction_history",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/compaction_manager/compaction_summary",
|
||||
"operations":[
|
||||
@@ -181,6 +200,43 @@
|
||||
"description":"A list of key, value mapping"
|
||||
}
|
||||
}
|
||||
},
|
||||
"history": {
|
||||
"id":"history",
|
||||
"description":"Compaction history information",
|
||||
"properties":{
|
||||
"id":{
|
||||
"type":"string",
|
||||
"description":"The UUID"
|
||||
},
|
||||
"cf":{
|
||||
"type":"string",
|
||||
"description":"The column family name"
|
||||
},
|
||||
"ks":{
|
||||
"type":"string",
|
||||
"description":"The keyspace name"
|
||||
},
|
||||
"compacted_at":{
|
||||
"type":"long",
|
||||
"description":"The time of compaction"
|
||||
},
|
||||
"bytes_in":{
|
||||
"type":"long",
|
||||
"description":"Bytes in"
|
||||
},
|
||||
"bytes_out":{
|
||||
"type":"long",
|
||||
"description":"Bytes out"
|
||||
},
|
||||
"rows_merged":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"mapper"
|
||||
},
|
||||
"description":"The merged rows"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,8 +193,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the RPC timeout",
|
||||
"type":"long",
|
||||
"summary":"Get the RPC timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_rpc_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -214,10 +214,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"Timeout in millis",
|
||||
"description":"Timeout in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -229,8 +229,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the read RPC timeout",
|
||||
"type":"long",
|
||||
"summary":"Get the read RPC timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_read_rpc_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -250,10 +250,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"timeout_in_millis",
|
||||
"description":"The timeout in second",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -265,8 +265,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the write RPC timeout",
|
||||
"type":"long",
|
||||
"summary":"Get the write RPC timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_write_rpc_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -286,10 +286,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"timeout in millisecond",
|
||||
"description":"timeout in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -301,8 +301,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get counter write rpc timeout",
|
||||
"type":"long",
|
||||
"summary":"Get counter write rpc timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_counter_write_rpc_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -322,10 +322,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"timeout in millisecond",
|
||||
"description":"timeout in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -337,8 +337,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get CAS contention timeout",
|
||||
"type":"long",
|
||||
"summary":"Get CAS contention timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_cas_contention_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -358,10 +358,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"timeout in millisecond",
|
||||
"description":"timeout in second",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -373,8 +373,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range rpc timeout",
|
||||
"type":"long",
|
||||
"summary":"Get range rpc timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_range_rpc_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -394,10 +394,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"timeout in millisecond",
|
||||
"description":"timeout in second",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -409,8 +409,8 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get truncate rpc timeout",
|
||||
"type":"long",
|
||||
"summary":"Get truncate rpc timeout in seconds",
|
||||
"type":"double",
|
||||
"nickname":"get_truncate_rpc_timeout",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -430,10 +430,10 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"timeout in millisecond",
|
||||
"description":"timeout in second",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"double",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -717,7 +717,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/read/latency/histogram",
|
||||
"path": "/storage_proxy/metrics/read/histogram",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
@@ -732,7 +732,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/range/latency/histogram",
|
||||
"path": "/storage_proxy/metrics/range/histogram",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
@@ -807,7 +807,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/write/latency/histogram",
|
||||
"path": "/storage_proxy/metrics/write/histogram",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
@@ -820,7 +820,103 @@
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/read/estimated_histogram/",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get read estimated latency",
|
||||
"$ref":"#/utils/estimated_histogram",
|
||||
"nickname":"get_read_estimated_histogram",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/read",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get read latency",
|
||||
"type":"int",
|
||||
"nickname":"get_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/write/estimated_histogram/",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get write estimated latency",
|
||||
"$ref":"#/utils/estimated_histogram",
|
||||
"nickname":"get_write_estimated_histogram",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/write",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get write latency",
|
||||
"type":"int",
|
||||
"nickname":"get_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/range/estimated_histogram/",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range estimated latency",
|
||||
"$ref":"#/utils/estimated_histogram",
|
||||
"nickname":"get_range_estimated_histogram",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/range",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range latency",
|
||||
"type":"int",
|
||||
"nickname":"get_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
"mapper_list":{
|
||||
|
||||
@@ -609,7 +609,7 @@
|
||||
"path":"/storage_service/keyspace_cleanup/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"method":"POST",
|
||||
"summary":"Trigger a cleanup of keys on a single keyspace",
|
||||
"type":"int",
|
||||
"nickname":"force_keyspace_cleanup",
|
||||
@@ -890,8 +890,8 @@
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"token",
|
||||
"description":"The token to remove",
|
||||
"name":"host_id",
|
||||
"description":"Remove the node with host_id from the cluster",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -1969,9 +1969,9 @@
|
||||
"id":"snapshot",
|
||||
"description":"Snapshot detail",
|
||||
"properties":{
|
||||
"key":{
|
||||
"ks":{
|
||||
"type":"string",
|
||||
"description":"The key snapshot key"
|
||||
"description":"The key space snapshot key"
|
||||
},
|
||||
"cf":{
|
||||
"type":"string",
|
||||
@@ -1993,7 +1993,7 @@
|
||||
"properties":{
|
||||
"key":{
|
||||
"type":"string",
|
||||
"description":"The keyspace"
|
||||
"description":"The snapshot key"
|
||||
},
|
||||
"value":{
|
||||
"type":"array",
|
||||
|
||||
@@ -25,6 +25,102 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/stream_manager/metrics/outbound",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get number of active outbound streams",
|
||||
"type":"int",
|
||||
"nickname":"get_all_active_streams_outbound",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/stream_manager/metrics/incoming/{peer}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total incoming bytes",
|
||||
"type":"int",
|
||||
"nickname":"get_total_incoming_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"peer",
|
||||
"description":"The stream peer",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/stream_manager/metrics/incoming",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total incoming bytes",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_incoming_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/stream_manager/metrics/outgoing/{peer}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total outgoing bytes",
|
||||
"type":"int",
|
||||
"nickname":"get_total_outgoing_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"peer",
|
||||
"description":"The stream peer",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/stream_manager/metrics/outgoing",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total outgoing bytes",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_outgoing_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
|
||||
114
api/api-doc/system.json
Normal file
114
api/api-doc/system.json
Normal file
@@ -0,0 +1,114 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/system",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/system/logger",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all logger names",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"nickname":"get_all_logger_names",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Set all logger level",
|
||||
"type":"void",
|
||||
"nickname":"set_all_logger_level",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"level",
|
||||
"description":"The new log level",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"error",
|
||||
"warn",
|
||||
"info",
|
||||
"debug",
|
||||
"trace"
|
||||
],
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/logger/{name}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get logger level",
|
||||
"type":"string",
|
||||
"nickname":"get_logger_level",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The logger to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Set logger level",
|
||||
"type":"void",
|
||||
"nickname":"set_logger_level",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The logger to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"level",
|
||||
"description":"The new log level",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"error",
|
||||
"warn",
|
||||
"info",
|
||||
"debug",
|
||||
"trace"
|
||||
],
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -38,6 +38,7 @@
|
||||
#include "hinted_handoff.hh"
|
||||
#include "http/exception.hh"
|
||||
#include "stream_manager.hh"
|
||||
#include "system.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -108,6 +109,10 @@ future<> set_server(http_context& ctx) {
|
||||
rb->register_function(r, "stream_manager",
|
||||
"The stream manager API");
|
||||
set_stream_manager(ctx, r);
|
||||
rb->register_function(r, "system",
|
||||
"The system related API");
|
||||
set_system(ctx, r);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -370,7 +370,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::reads);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -615,30 +615,29 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_row_cache_hit.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
cf::get_row_cache_hit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits;
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_row_cache_hit.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
cf::get_all_row_cache_hit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits;
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_row_cache_miss.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
cf::get_row_cache_miss.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().misses;
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_row_cache_miss.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
cf::get_all_row_cache_miss.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().misses;
|
||||
}, std::plus<int64_t>());
|
||||
|
||||
});
|
||||
|
||||
cf::get_cas_prepare.set(r, [] (std::unique_ptr<request> req) {
|
||||
@@ -662,41 +661,19 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_sstables_per_read_histogram.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
std::vector<double> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
cf::get_sstables_per_read_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], sstables::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_sstable_per_read;
|
||||
},
|
||||
sstables::merge, utils_json::estimated_histogram());
|
||||
});
|
||||
|
||||
cf::get_tombstone_scanned_histogram.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
httpd::utils_json::histogram res;
|
||||
res.count = 0;
|
||||
res.mean = 0;
|
||||
res.max = 0;
|
||||
res.min = 0;
|
||||
res.sum = 0;
|
||||
res.variance = 0;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
cf::get_tombstone_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::tombstone_scanned);
|
||||
});
|
||||
|
||||
cf::get_live_scanned_histogram.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
//std::vector<double> res;
|
||||
httpd::utils_json::histogram res;
|
||||
res.count = 0;
|
||||
res.mean = 0;
|
||||
res.max = 0;
|
||||
res.min = 0;
|
||||
res.sum = 0;
|
||||
res.variance = 0;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
cf::get_live_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::live_scanned);
|
||||
});
|
||||
|
||||
cf::get_col_update_time_delta_histogram.set(r, [] (std::unique_ptr<request> req) {
|
||||
|
||||
@@ -85,7 +85,12 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
|
||||
cm::get_compaction_history.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
std::vector<cm::history> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -27,47 +27,43 @@ namespace api {
|
||||
using namespace json;
|
||||
|
||||
void set_gossiper(http_context& ctx, routes& r) {
|
||||
httpd::gossiper_json::get_down_endpoint.set(r, [](std::unique_ptr<request> req) {
|
||||
return gms::get_unreachable_members().then([](std::set<gms::inet_address> res) {
|
||||
return make_ready_future<json::json_return_type>(container_to_vec(res));
|
||||
});
|
||||
httpd::gossiper_json::get_down_endpoint.set(r, [] (const_req req) {
|
||||
auto res = gms::get_local_gossiper().get_unreachable_members();
|
||||
return container_to_vec(res);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_live_endpoint.set(r, [](std::unique_ptr<request> req) {
|
||||
return gms::get_live_members().then([](std::set<gms::inet_address> res) {
|
||||
return make_ready_future<json::json_return_type>(container_to_vec(res));
|
||||
});
|
||||
httpd::gossiper_json::get_live_endpoint.set(r, [] (const_req req) {
|
||||
auto res = gms::get_local_gossiper().get_live_members();
|
||||
return container_to_vec(res);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_endpoint_downtime.set(r, [](std::unique_ptr<request> req) {
|
||||
httpd::gossiper_json::get_endpoint_downtime.set(r, [] (const_req req) {
|
||||
gms::inet_address ep(req.param["addr"]);
|
||||
return gms::get_local_gossiper().get_endpoint_downtime(ep);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_generation_number.set(r, [] (std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return gms::get_endpoint_downtime(ep).then([](int64_t res) {
|
||||
return gms::get_local_gossiper().get_current_generation_number(ep).then([] (int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_generation_number.set(r, [](std::unique_ptr<request> req) {
|
||||
httpd::gossiper_json::get_current_heart_beat_version.set(r, [] (std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return gms::get_current_generation_number(ep).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_heart_beat_version.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return gms::get_current_heart_beat_version(ep).then([](int res) {
|
||||
return gms::get_local_gossiper().get_current_heart_beat_version(ep).then([] (int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::assassinate_endpoint.set(r, [](std::unique_ptr<request> req) {
|
||||
if (req->get_query_param("unsafe") != "True") {
|
||||
return gms::assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
return gms::get_local_gossiper().assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
}
|
||||
return gms::unsafe_assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
return gms::get_local_gossiper().unsafe_assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -23,6 +23,9 @@
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "api/api-doc/storage_proxy.json.hh"
|
||||
#include "api/api-doc/utils.json.hh"
|
||||
#include "service/storage_service.hh"
|
||||
#include "db/config.hh"
|
||||
#include "utils/histogram.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -30,6 +33,23 @@ namespace sp = httpd::storage_proxy_json;
|
||||
using proxy = service::storage_proxy;
|
||||
using namespace json;
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, sstables::estimated_histogram proxy::stats::*f) {
|
||||
return ctx.sp.map_reduce0([f](const proxy& p) {return p.get_stats().*f;}, sstables::estimated_histogram(),
|
||||
sstables::merge).then([](const sstables::estimated_histogram& val) {
|
||||
utils_json::estimated_histogram res;
|
||||
res = val;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> total_latency(http_context& ctx, utils::ihistogram proxy::stats::*f) {
|
||||
return ctx.sp.map_reduce0([f](const proxy& p) {return (p.get_stats().*f).mean * (p.get_stats().*f).count;}, 0.0,
|
||||
std::plus<double>()).then([](double val) {
|
||||
int64_t res = val;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
}
|
||||
|
||||
void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
sp::get_total_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
@@ -39,7 +59,9 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
|
||||
sp::get_hinted_handoff_enabled.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
// FIXME
|
||||
// hinted handoff is not supported currently,
|
||||
// so we should return false
|
||||
return make_ready_future<json::json_return_type>(false);
|
||||
});
|
||||
|
||||
@@ -96,10 +118,8 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(1);
|
||||
sp::get_rpc_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -109,10 +129,8 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_read_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
sp::get_read_rpc_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().read_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_read_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -122,10 +140,8 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
sp::get_write_rpc_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().write_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -135,11 +151,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_counter_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
sp::get_counter_write_rpc_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().counter_write_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_counter_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
@@ -147,10 +162,8 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_cas_contention_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
sp::get_cas_contention_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().cas_contention_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_cas_contention_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -160,10 +173,8 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_range_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
sp::get_range_rpc_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().range_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_range_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -173,10 +184,8 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_truncate_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
sp::get_truncate_rpc_timeout.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().get_config().truncate_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_truncate_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -212,8 +221,14 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
|
||||
sp::get_schema_versions.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
// FIXME
|
||||
// describe_schema_versions is not implemented yet
|
||||
// this is a work around
|
||||
std::vector<sp::mapper_list> res;
|
||||
sp::mapper_list entry;
|
||||
entry.key = boost::lexical_cast<std::string>(utils::fb_utilities::get_broadcast_address());
|
||||
entry.value.push(service::get_local_storage_service().get_schema_version());
|
||||
res.push_back(entry);
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
@@ -316,6 +331,29 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
sp::get_read_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats(ctx.sp, &proxy::stats::read);
|
||||
});
|
||||
|
||||
sp::get_read_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::estimated_read);
|
||||
});
|
||||
|
||||
sp::get_read_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &proxy::stats::read);
|
||||
});
|
||||
sp::get_write_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::estimated_write);
|
||||
});
|
||||
|
||||
sp::get_write_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &proxy::stats::write);
|
||||
});
|
||||
|
||||
sp::get_range_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats(ctx.sp, &proxy::stats::read);
|
||||
});
|
||||
|
||||
sp::get_range_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &proxy::stats::range);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -30,8 +30,6 @@
|
||||
#include "repair/repair.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "column_family.hh"
|
||||
#include <unordered_map>
|
||||
#include "utils/fb_utilities.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -52,28 +50,25 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_tokens.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::sorted_tokens().then([](const std::vector<dht::token>& tokens) {
|
||||
return make_ready_future<json::json_return_type>(container_to_vec(tokens));
|
||||
});
|
||||
ss::get_tokens.set(r, [] (const_req req) {
|
||||
auto tokens = service::get_local_storage_service().get_token_metadata().sorted_tokens();
|
||||
return container_to_vec(tokens);
|
||||
});
|
||||
|
||||
ss::get_node_tokens.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address addr(req->param["endpoint"]);
|
||||
return service::get_tokens(addr).then([](const std::vector<dht::token>& tokens) {
|
||||
return make_ready_future<json::json_return_type>(container_to_vec(tokens));
|
||||
});
|
||||
ss::get_node_tokens.set(r, [] (const_req req) {
|
||||
gms::inet_address addr(req.param["endpoint"]);
|
||||
auto tokens = service::get_local_storage_service().get_token_metadata().get_tokens(addr);
|
||||
return container_to_vec(tokens);
|
||||
});
|
||||
|
||||
ss::get_commitlog.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().commitlog()->active_config().commit_log_location;
|
||||
});
|
||||
|
||||
ss::get_token_endpoint.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_token_to_endpoint().then([] (const std::map<dht::token, gms::inet_address>& tokens){
|
||||
std::vector<storage_service_json::mapper> res;
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value(tokens, res));
|
||||
});
|
||||
ss::get_token_endpoint.set(r, [] (const_req req) {
|
||||
auto token_to_ep = service::get_local_storage_service().get_token_metadata().get_token_to_endpoint();
|
||||
std::vector<storage_service_json::mapper> res;
|
||||
return map_to_key_value(token_to_ep, res);
|
||||
});
|
||||
|
||||
ss::get_leaving_nodes.set(r, [](const_req req) {
|
||||
@@ -148,31 +143,16 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
return get_cf_stats(ctx, &column_family::stats::live_disk_space_used);
|
||||
});
|
||||
|
||||
ss::get_load_map.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
// FIXME
|
||||
// The function should return a mapping between inet address
|
||||
// and the load (disk space used)
|
||||
// in origin the implementation is based on the load broadcast
|
||||
// we do not currently support.
|
||||
// As a workaround, the local load is calculated (this part is similar
|
||||
// to origin) and a map with a single entry is return.
|
||||
return ctx.db.map_reduce0([](database& db) {
|
||||
int64_t res = 0;
|
||||
for (auto i : db.get_column_families()) {
|
||||
res += i.second->get_stats().live_disk_space_used;
|
||||
}
|
||||
return res;
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t size) {
|
||||
ss::get_load_map.set(r, [] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().get_load_map().then([] (auto&& load_map) {
|
||||
std::vector<ss::mapper> res;
|
||||
std::unordered_map<gms::inet_address, double> load_map;
|
||||
load_map[utils::fb_utilities::get_broadcast_address()] = size;
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value(load_map, res));
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_current_generation_number.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(utils::fb_utilities::get_broadcast_address());
|
||||
return gms::get_current_generation_number(ep).then([](int res) {
|
||||
return gms::get_local_gossiper().get_current_generation_number(ep).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
@@ -189,33 +169,61 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::get_snapshot_details.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
std::vector<ss::snapshots> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return service::get_local_storage_service().get_snapshot_details().then([] (auto result) {
|
||||
std::vector<ss::snapshots> res;
|
||||
for (auto& map: result) {
|
||||
ss::snapshots all_snapshots;
|
||||
all_snapshots.key = map.first;
|
||||
|
||||
std::vector<ss::snapshot> snapshot;
|
||||
for (auto& cf: map.second) {
|
||||
ss::snapshot s;
|
||||
s.ks = cf.ks;
|
||||
s.cf = cf.cf;
|
||||
s.live = cf.live;
|
||||
s.total = cf.total;
|
||||
snapshot.push_back(std::move(s));
|
||||
}
|
||||
all_snapshots.value = std::move(snapshot);
|
||||
res.push_back(std::move(all_snapshots));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(std::move(res));
|
||||
});
|
||||
});
|
||||
|
||||
ss::take_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto keyname = req->get_query_param("kn");
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
|
||||
auto resp = make_ready_future<>();
|
||||
if (column_family.empty()) {
|
||||
resp = service::get_local_storage_service().take_snapshot(tag, keynames);
|
||||
} else {
|
||||
if (keynames.size() > 1) {
|
||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||
}
|
||||
resp = service::get_local_storage_service().take_column_family_snapshot(keynames[0], column_family, tag);
|
||||
}
|
||||
return resp.then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::del_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto keyname = req->get_query_param("kn");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
return service::get_local_storage_service().clear_snapshot(tag, keynames).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::true_snapshots_size.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
return service::get_local_storage_service().true_snapshots_size().then([] (int64_t size) {
|
||||
return make_ready_future<json::json_return_type>(size);
|
||||
});
|
||||
});
|
||||
|
||||
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
@@ -338,9 +346,8 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::remove_node.set(r, [](std::unique_ptr<request> req) {
|
||||
// FIXME: This api is incorrect. remove_node takes a host id string parameter instead of token.
|
||||
auto host_id = req->get_query_param("token");
|
||||
return service::get_local_storage_service().remove_node(std::move(host_id)).then([] {
|
||||
auto host_id = req->get_query_param("host_id");
|
||||
return service::get_local_storage_service().remove_node(host_id).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
@@ -440,8 +447,10 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::is_initialized.set(r, [](const_req req) {
|
||||
return service::get_local_storage_service().is_initialized();
|
||||
ss::is_initialized.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().is_initialized().then([] (bool initialized) {
|
||||
return make_ready_future<json::json_return_type>(initialized);
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_rpc_server.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -456,8 +465,10 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_rpc_server_running.set(r, [](const_req req) {
|
||||
return service::get_local_storage_service().is_rpc_server_running();
|
||||
ss::is_rpc_server_running.set(r, [] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().is_rpc_server_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_native_transport.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -472,8 +483,10 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_native_transport_running.set(r, [](const_req req) {
|
||||
return service::get_local_storage_service().is_native_transport_running();
|
||||
ss::is_native_transport_running.set(r, [] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().is_native_transport_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
|
||||
ss::join_ring.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -575,11 +588,16 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::load_new_ss_tables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
auto ks = validate_keyspace(ctx, req->param);
|
||||
auto cf = req->get_query_param("cf");
|
||||
// No need to add the keyspace, since all we want is to avoid always sending this to the same
|
||||
// CPU. Even then I am being overzealous here. This is not something that happens all the time.
|
||||
auto coordinator = std::hash<sstring>()(cf) % smp::count;
|
||||
return service::get_storage_service().invoke_on(coordinator, [ks = std::move(ks), cf = std::move(cf)] (service::storage_service& s) {
|
||||
return s.load_new_sstables(ks, cf);
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::sample_key_range.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -631,16 +649,12 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::get_cluster_name.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
ss::get_cluster_name.set(r, [](const_req req) {
|
||||
return gms::get_local_gossiper().get_cluster_name();
|
||||
});
|
||||
|
||||
ss::get_partitioner_name.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
ss::get_partitioner_name.set(r, [](const_req req) {
|
||||
return gms::get_local_gossiper().get_partitioner_name();
|
||||
});
|
||||
|
||||
ss::get_tombstone_warn_threshold.set(r, [](std::unique_ptr<request> req) {
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "streaming/stream_result_future.hh"
|
||||
#include "api/api-doc/stream_manager.json.hh"
|
||||
#include <vector>
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -97,6 +98,80 @@ void set_stream_manager(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_all_active_streams_outbound.set(r, [](std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
|
||||
return stream.get_initiated_streams().size();
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_total_incoming_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["peer"]);
|
||||
utils::UUID plan_id = gms::get_local_gossiper().get_host_id(ep);
|
||||
return streaming::get_stream_manager().map_reduce0([plan_id](streaming::stream_manager& stream) {
|
||||
int64_t res = 0;
|
||||
streaming::stream_result_future* s = stream.get_receiving_stream(plan_id).get();
|
||||
if (s != nullptr) {
|
||||
for (auto si: s->get_coordinator()->get_all_session_info()) {
|
||||
res += si.get_total_size_received();
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_all_total_incoming_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
|
||||
int64_t res = 0;
|
||||
for (auto s : stream.get_receiving_streams()) {
|
||||
if (s.second.get() != nullptr) {
|
||||
for (auto si: s.second.get()->get_coordinator()->get_all_session_info()) {
|
||||
res += si.get_total_size_received();
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_total_outgoing_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["peer"]);
|
||||
utils::UUID plan_id = gms::get_local_gossiper().get_host_id(ep);
|
||||
return streaming::get_stream_manager().map_reduce0([plan_id](streaming::stream_manager& stream) {
|
||||
int64_t res = 0;
|
||||
streaming::stream_result_future* s = stream.get_sending_stream(plan_id).get();
|
||||
if (s != nullptr) {
|
||||
for (auto si: s->get_coordinator()->get_all_session_info()) {
|
||||
res += si.get_total_size_received();
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_all_total_outgoing_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
|
||||
int64_t res = 0;
|
||||
for (auto s : stream.get_initiated_streams()) {
|
||||
if (s.second.get() != nullptr) {
|
||||
for (auto si: s.second.get()->get_coordinator()->get_all_session_info()) {
|
||||
res += si.get_total_size_received();
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
70
api/system.cc
Normal file
70
api/system.cc
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api/api-doc/system.json.hh"
|
||||
#include "api/api.hh"
|
||||
|
||||
#include "http/exception.hh"
|
||||
#include "log.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace hs = httpd::system_json;
|
||||
|
||||
void set_system(http_context& ctx, routes& r) {
|
||||
hs::get_all_logger_names.set(r, [](const_req req) {
|
||||
return logging::logger_registry().get_all_logger_names();
|
||||
});
|
||||
|
||||
hs::set_all_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
|
||||
logging::logger_registry().set_all_loggers_level(level);
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
|
||||
}
|
||||
return json::json_void();
|
||||
});
|
||||
|
||||
hs::get_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
return logging::level_name(logging::logger_registry().get_logger_level(req.param["name"]));
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception("Unknown logger name " + req.param["name"]);
|
||||
}
|
||||
// just to keep the compiler happy
|
||||
return sstring();
|
||||
});
|
||||
|
||||
hs::set_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
|
||||
logging::logger_registry().set_logger_level(req.param["name"], level);
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception("Unknown logger name " + req.param["name"]);
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
|
||||
}
|
||||
return json::json_void();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
30
api/system.hh
Normal file
30
api/system.hh
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_system(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
@@ -43,11 +43,13 @@ class caching_options {
|
||||
throw exceptions::configuration_exception("Invalid key value: " + k);
|
||||
}
|
||||
|
||||
try {
|
||||
boost::lexical_cast<unsigned long>(r);
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
if ((r != "ALL") && (r != "NONE")) {
|
||||
throw exceptions::configuration_exception("Invalid key value: " + k);
|
||||
if ((r == "ALL") || (r == "NONE")) {
|
||||
return;
|
||||
} else {
|
||||
try {
|
||||
boost::lexical_cast<unsigned long>(r);
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw exceptions::configuration_exception("Invalid key value: " + r);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,8 @@ enum class compaction_strategy_type {
|
||||
null,
|
||||
major,
|
||||
size_tiered,
|
||||
// FIXME: Add support to LevelTiered, and DateTiered.
|
||||
leveled,
|
||||
// FIXME: Add support to DateTiered.
|
||||
};
|
||||
|
||||
class compaction_strategy_impl;
|
||||
@@ -54,6 +55,8 @@ public:
|
||||
return "MajorCompactionStrategy";
|
||||
case compaction_strategy_type::size_tiered:
|
||||
return "SizeTieredCompactionStrategy";
|
||||
case compaction_strategy_type::leveled:
|
||||
return "LeveledCompactionStrategy";
|
||||
default:
|
||||
throw std::runtime_error("Invalid Compaction Strategy");
|
||||
}
|
||||
@@ -66,6 +69,8 @@ public:
|
||||
return compaction_strategy_type::major;
|
||||
} else if (name == "SizeTieredCompactionStrategy") {
|
||||
return compaction_strategy_type::size_tiered;
|
||||
} else if (name == "LeveledCompactionStrategy") {
|
||||
return compaction_strategy_type::leveled;
|
||||
} else {
|
||||
throw exceptions::configuration_exception(sprint("Unable to find compaction strategy class 'org.apache.cassandra.db.compaction.%s", name));
|
||||
}
|
||||
|
||||
15
conf/cassandra-rackdc.properties
Executable file
15
conf/cassandra-rackdc.properties
Executable file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# cassandra-rackdc.properties
|
||||
#
|
||||
# The lines may include white spaces at the beginning and the end.
|
||||
# The rack and data center names may also include white spaces.
|
||||
# All trailing and leading white spaces will be trimmed.
|
||||
#
|
||||
# dc=my_data_center
|
||||
# rack=my_rack
|
||||
# prefer_local=<false | true>
|
||||
# dc_suffix=<Data Center name suffix, used by EC2SnitchXXX snitches>
|
||||
#
|
||||
|
||||
|
||||
|
||||
48
configure.py
48
configure.py
@@ -132,7 +132,7 @@ modes = {
|
||||
},
|
||||
}
|
||||
|
||||
urchin_tests = [
|
||||
scylla_tests = [
|
||||
'tests/mutation_test',
|
||||
'tests/range_test',
|
||||
'tests/types_test',
|
||||
@@ -152,6 +152,7 @@ urchin_tests = [
|
||||
'tests/cql_query_test',
|
||||
'tests/storage_proxy_test',
|
||||
'tests/mutation_reader_test',
|
||||
'tests/key_reader_test',
|
||||
'tests/mutation_query_test',
|
||||
'tests/row_cache_test',
|
||||
'tests/test-serialization',
|
||||
@@ -169,6 +170,7 @@ urchin_tests = [
|
||||
'tests/compound_test',
|
||||
'tests/config_test',
|
||||
'tests/gossiping_property_file_snitch_test',
|
||||
'tests/ec2_snitch_test',
|
||||
'tests/snitch_reset_test',
|
||||
'tests/network_topology_strategy_test',
|
||||
'tests/query_processor_test',
|
||||
@@ -180,13 +182,14 @@ urchin_tests = [
|
||||
'tests/logalloc_test',
|
||||
'tests/managed_vector_test',
|
||||
'tests/crc_test',
|
||||
'tests/flush_queue_test',
|
||||
]
|
||||
|
||||
apps = [
|
||||
'scylla',
|
||||
]
|
||||
|
||||
tests = urchin_tests
|
||||
tests = scylla_tests
|
||||
|
||||
all_artifacts = apps + tests
|
||||
|
||||
@@ -216,18 +219,20 @@ arg_parser.add_argument('--debuginfo', action = 'store', dest = 'debuginfo', typ
|
||||
help = 'Enable(1)/disable(0)compiler debug information generation')
|
||||
arg_parser.add_argument('--static-stdc++', dest = 'staticcxx', action = 'store_true',
|
||||
help = 'Link libgcc and libstdc++ statically')
|
||||
arg_parser.add_argument('--tests-debuginfo', action = 'store', dest = 'tests_debuginfo', type = int, default = 0,
|
||||
help = 'Enable(1)/disable(0)compiler debug information generation for tests')
|
||||
add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
|
||||
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
defines = []
|
||||
urchin_libs = '-llz4 -lsnappy -lz -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
|
||||
scylla_libs = '-llz4 -lsnappy -lz -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
|
||||
|
||||
extra_cxxflags = {}
|
||||
|
||||
cassandra_interface = Thrift(source = 'interface/cassandra.thrift', service = 'Cassandra')
|
||||
|
||||
urchin_core = (['database.cc',
|
||||
scylla_core = (['database.cc',
|
||||
'schema.cc',
|
||||
'bytes.cc',
|
||||
'mutation.cc',
|
||||
@@ -242,6 +247,7 @@ urchin_core = (['database.cc',
|
||||
'mutation_partition_serializer.cc',
|
||||
'mutation_reader.cc',
|
||||
'mutation_query.cc',
|
||||
'key_reader.cc',
|
||||
'keys.cc',
|
||||
'sstables/sstables.cc',
|
||||
'sstables/compress.cc',
|
||||
@@ -266,6 +272,8 @@ urchin_core = (['database.cc',
|
||||
'cql3/maps.cc',
|
||||
'cql3/functions/functions.cc',
|
||||
'cql3/statements/cf_prop_defs.cc',
|
||||
'cql3/statements/cf_statement.cc',
|
||||
'cql3/statements/create_keyspace_statement.cc',
|
||||
'cql3/statements/create_table_statement.cc',
|
||||
'cql3/statements/drop_keyspace_statement.cc',
|
||||
'cql3/statements/drop_table_statement.cc',
|
||||
@@ -339,10 +347,12 @@ urchin_core = (['database.cc',
|
||||
'gms/gossip_digest_ack.cc',
|
||||
'gms/gossip_digest_ack2.cc',
|
||||
'gms/endpoint_state.cc',
|
||||
'gms/application_state.cc',
|
||||
'dht/i_partitioner.cc',
|
||||
'dht/murmur3_partitioner.cc',
|
||||
'dht/byte_ordered_partitioner.cc',
|
||||
'dht/boot_strapper.cc',
|
||||
'dht/range_streamer.cc',
|
||||
'unimplemented.cc',
|
||||
'query.cc',
|
||||
'query-result-set.cc',
|
||||
@@ -356,9 +366,14 @@ urchin_core = (['database.cc',
|
||||
'locator/simple_snitch.cc',
|
||||
'locator/rack_inferring_snitch.cc',
|
||||
'locator/gossiping_property_file_snitch.cc',
|
||||
'locator/production_snitch_base.cc',
|
||||
'locator/ec2_snitch.cc',
|
||||
'locator/ec2_multi_region_snitch.cc',
|
||||
'message/messaging_service.cc',
|
||||
'service/migration_task.cc',
|
||||
'service/storage_service.cc',
|
||||
'service/pending_range_calculator_service.cc',
|
||||
'service/load_broadcaster.cc',
|
||||
'streaming/streaming.cc',
|
||||
'streaming/stream_task.cc',
|
||||
'streaming/stream_session.cc',
|
||||
@@ -384,6 +399,7 @@ urchin_core = (['database.cc',
|
||||
'init.cc',
|
||||
'repair/repair.cc',
|
||||
'exceptions/exceptions.cc',
|
||||
'dns.cc',
|
||||
]
|
||||
+ [Antlr3Grammar('cql3/Cql.g')]
|
||||
+ [Thrift('interface/cassandra.thrift', 'Cassandra')]
|
||||
@@ -419,22 +435,24 @@ api = ['api/api.cc',
|
||||
'api/lsa.cc',
|
||||
'api/api-doc/stream_manager.json',
|
||||
'api/stream_manager.cc',
|
||||
'api/api-doc/system.json',
|
||||
'api/system.cc'
|
||||
]
|
||||
|
||||
urchin_tests_dependencies = urchin_core + [
|
||||
scylla_tests_dependencies = scylla_core + [
|
||||
'tests/cql_test_env.cc',
|
||||
'tests/cql_assertions.cc',
|
||||
'tests/result_set_assertions.cc',
|
||||
'tests/mutation_source_test.cc',
|
||||
]
|
||||
|
||||
urchin_tests_seastar_deps = [
|
||||
scylla_tests_seastar_deps = [
|
||||
'seastar/tests/test-utils.cc',
|
||||
'seastar/tests/test_runner.cc',
|
||||
]
|
||||
|
||||
deps = {
|
||||
'scylla': ['main.cc'] + urchin_core + api,
|
||||
'scylla': ['main.cc'] + scylla_core + api,
|
||||
}
|
||||
|
||||
tests_not_using_seastar_test_framework = set([
|
||||
@@ -464,13 +482,13 @@ tests_not_using_seastar_test_framework = set([
|
||||
])
|
||||
|
||||
for t in tests_not_using_seastar_test_framework:
|
||||
if not t in urchin_tests:
|
||||
raise Exception("Test %s not found in urchin_tests" % (t))
|
||||
if not t in scylla_tests:
|
||||
raise Exception("Test %s not found in scylla_tests" % (t))
|
||||
|
||||
for t in urchin_tests:
|
||||
deps[t] = urchin_tests_dependencies + [t + '.cc']
|
||||
for t in scylla_tests:
|
||||
deps[t] = scylla_tests_dependencies + [t + '.cc']
|
||||
if t not in tests_not_using_seastar_test_framework:
|
||||
deps[t] += urchin_tests_seastar_deps
|
||||
deps[t] += scylla_tests_seastar_deps
|
||||
|
||||
deps['tests/sstable_test'] += ['tests/sstable_datafile_test.cc']
|
||||
|
||||
@@ -491,6 +509,7 @@ warnings = [w
|
||||
warnings = ' '.join(warnings)
|
||||
|
||||
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
|
||||
tests_link_rule = 'link' if args.tests_debuginfo else 'link_stripped'
|
||||
|
||||
if args.so:
|
||||
args.pie = '-shared'
|
||||
@@ -531,7 +550,8 @@ if args.dpdk:
|
||||
elif args.dpdk_target:
|
||||
seastar_flags += ['--dpdk-target', args.dpdk_target]
|
||||
|
||||
seastar_flags += ['--compiler', args.cxx, '--cflags=-march=nehalem']
|
||||
seastar_cflags = args.user_cflags + " -march=nehalem"
|
||||
seastar_flags += ['--compiler', args.cxx, '--cflags=%s' % (seastar_cflags)]
|
||||
|
||||
status = subprocess.call(['./configure.py'] + seastar_flags, cwd = 'seastar')
|
||||
|
||||
@@ -665,7 +685,7 @@ with open(buildfile, 'w') as f:
|
||||
# So we strip the tests by default; The user can very
|
||||
# quickly re-link the test unstripped by adding a "_g"
|
||||
# to the test name, e.g., "ninja build/release/testname_g"
|
||||
f.write('build $builddir/{}/{}: link_stripped.{} {} {}\n'.format(mode, binary, mode, str.join(' ', objs),
|
||||
f.write('build $builddir/{}/{}: {}.{} {} {}\n'.format(mode, binary, tests_link_rule, mode, str.join(' ', objs),
|
||||
'seastar/build/{}/libseastar.a'.format(mode)))
|
||||
if has_thrift:
|
||||
f.write(' libs = -lthrift -lboost_system $libs\n')
|
||||
|
||||
@@ -1224,8 +1224,8 @@ properties[::shared_ptr<cql3::statements::property_definitions> props]
|
||||
;
|
||||
|
||||
property[::shared_ptr<cql3::statements::property_definitions> props]
|
||||
: k=ident '=' (simple=propertyValue { try { $props->add_property(k->to_string(), simple); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
| map=mapLiteral { try { $props->add_property(k->to_string(), convert_property_map(map)); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } })
|
||||
: k=ident '=' simple=propertyValue { try { $props->add_property(k->to_string(), simple); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
| k=ident '=' map=mapLiteral { try { $props->add_property(k->to_string(), convert_property_map(map)); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
;
|
||||
|
||||
propertyValue returns [sstring str]
|
||||
|
||||
@@ -98,6 +98,10 @@ public:
|
||||
execute_internal(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) = 0;
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const = 0;
|
||||
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const = 0;
|
||||
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -272,7 +272,7 @@ lists::setter_by_index::execute(mutation& m, const exploded_clustering_prefix& p
|
||||
|
||||
auto existing_list_opt = params.get_prefetched_list(m.key(), row_key, column);
|
||||
if (!existing_list_opt) {
|
||||
throw exceptions::invalid_request_exception(sprint("List index %d out of bound, list has size 0", idx));
|
||||
throw exceptions::invalid_request_exception("Attempted to set an element on a list which is null");
|
||||
}
|
||||
collection_mutation::view existing_list_ser = *existing_list_opt;
|
||||
auto ltype = dynamic_pointer_cast<const list_type_impl>(column.type);
|
||||
@@ -448,7 +448,7 @@ lists::discarder_by_index::execute(mutation& m, const exploded_clustering_prefix
|
||||
auto&& existing_list = params.get_prefetched_list(m.key(), row_key, column);
|
||||
int32_t idx = read_simple_exactly<int32_t>(*cvalue->_bytes);
|
||||
if (!existing_list) {
|
||||
throw exceptions::invalid_request_exception("List does not exist");
|
||||
throw exceptions::invalid_request_exception("Attempted to delete an element from a list which is null");
|
||||
}
|
||||
auto&& deserialized = ltype->deserialize_mutation_form(*existing_list);
|
||||
if (idx < 0 || size_t(idx) >= deserialized.cells.size()) {
|
||||
|
||||
@@ -56,14 +56,15 @@ using namespace transport::messages;
|
||||
|
||||
logging::logger log("query_processor");
|
||||
|
||||
distributed<query_processor> _the_query_processor;
|
||||
|
||||
const sstring query_processor::CQL_VERSION = "3.2.0";
|
||||
|
||||
class query_processor::internal_state {
|
||||
service::client_state _cs;
|
||||
service::query_state _qs;
|
||||
public:
|
||||
internal_state()
|
||||
: _cs(service::client_state::internal_tag()), _qs(_cs) {
|
||||
: _qs(service::client_state{service::client_state::internal_tag()}) {
|
||||
}
|
||||
operator service::query_state&() {
|
||||
return _qs;
|
||||
@@ -72,14 +73,13 @@ public:
|
||||
return _qs;
|
||||
}
|
||||
operator service::client_state&() {
|
||||
return _cs;
|
||||
return _qs.get_client_state();
|
||||
}
|
||||
operator const service::client_state&() const {
|
||||
return _cs;
|
||||
return _qs.get_client_state();
|
||||
}
|
||||
|
||||
api::timestamp_type next_timestamp() {
|
||||
return _cs.get_timestamp();
|
||||
return _qs.get_client_state().get_timestamp();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -89,12 +89,23 @@ api::timestamp_type query_processor::next_timestamp() {
|
||||
|
||||
query_processor::query_processor(distributed<service::storage_proxy>& proxy,
|
||||
distributed<database>& db)
|
||||
: _proxy(proxy), _db(db), _internal_state(new internal_state()) {
|
||||
: _migration_subscriber{std::make_unique<migration_subscriber>(this)}
|
||||
, _proxy(proxy)
|
||||
, _db(db)
|
||||
, _internal_state(new internal_state())
|
||||
{
|
||||
service::get_local_migration_manager().register_listener(_migration_subscriber.get());
|
||||
}
|
||||
|
||||
query_processor::~query_processor()
|
||||
{}
|
||||
|
||||
future<> query_processor::stop()
|
||||
{
|
||||
service::get_local_migration_manager().unregister_listener(_migration_subscriber.get());
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<::shared_ptr<result_message>>
|
||||
query_processor::process(const sstring_view& query_string, service::query_state& query_state, query_options& options)
|
||||
{
|
||||
@@ -212,6 +223,11 @@ query_processor::store_prepared_statement(const std::experimental::string_view&
|
||||
}
|
||||
}
|
||||
|
||||
void query_processor::invalidate_prepared_statement(bytes statement_id)
|
||||
{
|
||||
_prepared_statements.erase(statement_id);
|
||||
}
|
||||
|
||||
static bytes md5_calculate(const std::experimental::string_view& s)
|
||||
{
|
||||
constexpr size_t size = CryptoPP::Weak1::MD5::DIGESTSIZE;
|
||||
@@ -338,5 +354,97 @@ query_processor::process_batch(::shared_ptr<statements::batch_statement> batch,
|
||||
return batch->execute(_proxy, query_state, options);
|
||||
}
|
||||
|
||||
query_processor::migration_subscriber::migration_subscriber(query_processor* qp)
|
||||
: _qp{qp}
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_create_keyspace(const sstring& ks_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_create_column_family(const sstring& ks_name, const sstring& cf_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_create_user_type(const sstring& ks_name, const sstring& type_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_create_function(const sstring& ks_name, const sstring& function_name)
|
||||
{
|
||||
log.warn("{} event ignored", __func__);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_create_aggregate(const sstring& ks_name, const sstring& aggregate_name)
|
||||
{
|
||||
log.warn("{} event ignored", __func__);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_update_keyspace(const sstring& ks_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_update_column_family(const sstring& ks_name, const sstring& cf_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_update_user_type(const sstring& ks_name, const sstring& type_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_update_function(const sstring& ks_name, const sstring& function_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_update_aggregate(const sstring& ks_name, const sstring& aggregate_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_drop_keyspace(const sstring& ks_name)
|
||||
{
|
||||
remove_invalid_prepared_statements(ks_name, std::experimental::nullopt);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_drop_column_family(const sstring& ks_name, const sstring& cf_name)
|
||||
{
|
||||
remove_invalid_prepared_statements(ks_name, cf_name);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_drop_user_type(const sstring& ks_name, const sstring& type_name)
|
||||
{
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_drop_function(const sstring& ks_name, const sstring& function_name)
|
||||
{
|
||||
log.warn("{} event ignored", __func__);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_drop_aggregate(const sstring& ks_name, const sstring& aggregate_name)
|
||||
{
|
||||
log.warn("{} event ignored", __func__);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::remove_invalid_prepared_statements(sstring ks_name, std::experimental::optional<sstring> cf_name)
|
||||
{
|
||||
std::vector<bytes> invalid;
|
||||
for (auto& kv : _qp->_prepared_statements) {
|
||||
auto id = kv.first;
|
||||
auto stmt = kv.second;
|
||||
if (should_invalidate(ks_name, cf_name, stmt->statement)) {
|
||||
invalid.emplace_back(id);
|
||||
}
|
||||
}
|
||||
for (auto& id : invalid) {
|
||||
get_query_processor().invoke_on_all([id] (auto& qp) {
|
||||
qp.invalidate_prepared_statement(id);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
bool query_processor::migration_subscriber::should_invalidate(sstring ks_name, std::experimental::optional<sstring> cf_name, ::shared_ptr<cql_statement> statement)
|
||||
{
|
||||
return statement->depends_on_keyspace(ks_name) && (!cf_name || statement->depends_on_column_family(*cf_name));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/statements/cf_statement.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/query_state.hh"
|
||||
#include "log.hh"
|
||||
#include "core/distributed.hh"
|
||||
@@ -61,7 +62,10 @@ class batch_statement;
|
||||
}
|
||||
|
||||
class query_processor {
|
||||
public:
|
||||
class migration_subscriber;
|
||||
private:
|
||||
std::unique_ptr<migration_subscriber> _migration_subscriber;
|
||||
distributed<service::storage_proxy>& _proxy;
|
||||
distributed<database>& _db;
|
||||
|
||||
@@ -423,6 +427,8 @@ private:
|
||||
future<::shared_ptr<transport::messages::result_message::prepared>>
|
||||
store_prepared_statement(const std::experimental::string_view& query_string, const sstring& keyspace, ::shared_ptr<statements::parsed_statement::prepared> prepared, bool for_thrift);
|
||||
|
||||
void invalidate_prepared_statement(bytes statement_id);
|
||||
|
||||
#if 0
|
||||
public ResultMessage processPrepared(CQLStatement statement, QueryState queryState, QueryOptions options)
|
||||
throws RequestExecutionException, RequestValidationException
|
||||
@@ -461,111 +467,48 @@ public:
|
||||
{
|
||||
return meter.measureDeep(key);
|
||||
}
|
||||
|
||||
private static class MigrationSubscriber implements IMigrationListener
|
||||
{
|
||||
private void removeInvalidPreparedStatements(String ksName, String cfName)
|
||||
{
|
||||
removeInvalidPreparedStatements(preparedStatements.values().iterator(), ksName, cfName);
|
||||
removeInvalidPreparedStatements(thriftPreparedStatements.values().iterator(), ksName, cfName);
|
||||
}
|
||||
|
||||
private void removeInvalidPreparedStatements(Iterator<ParsedStatement.Prepared> iterator, String ksName, String cfName)
|
||||
{
|
||||
while (iterator.hasNext())
|
||||
{
|
||||
if (shouldInvalidate(ksName, cfName, iterator.next().statement))
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean shouldInvalidate(String ksName, String cfName, CQLStatement statement)
|
||||
{
|
||||
String statementKsName;
|
||||
String statementCfName;
|
||||
|
||||
if (statement instanceof ModificationStatement)
|
||||
{
|
||||
ModificationStatement modificationStatement = ((ModificationStatement) statement);
|
||||
statementKsName = modificationStatement.keyspace();
|
||||
statementCfName = modificationStatement.columnFamily();
|
||||
}
|
||||
else if (statement instanceof SelectStatement)
|
||||
{
|
||||
SelectStatement selectStatement = ((SelectStatement) statement);
|
||||
statementKsName = selectStatement.keyspace();
|
||||
statementCfName = selectStatement.columnFamily();
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return ksName.equals(statementKsName) && (cfName == null || cfName.equals(statementCfName));
|
||||
}
|
||||
|
||||
public void onCreateKeyspace(String ksName) { }
|
||||
public void onCreateColumnFamily(String ksName, String cfName) { }
|
||||
public void onCreateUserType(String ksName, String typeName) { }
|
||||
public void onCreateFunction(String ksName, String functionName) {
|
||||
if (Functions.getOverloadCount(new FunctionName(ksName, functionName)) > 1)
|
||||
{
|
||||
// in case there are other overloads, we have to remove all overloads since argument type
|
||||
// matching may change (due to type casting)
|
||||
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, functionName);
|
||||
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, functionName);
|
||||
}
|
||||
}
|
||||
public void onCreateAggregate(String ksName, String aggregateName) {
|
||||
if (Functions.getOverloadCount(new FunctionName(ksName, aggregateName)) > 1)
|
||||
{
|
||||
// in case there are other overloads, we have to remove all overloads since argument type
|
||||
// matching may change (due to type casting)
|
||||
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, aggregateName);
|
||||
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, aggregateName);
|
||||
}
|
||||
}
|
||||
|
||||
public void onUpdateKeyspace(String ksName) { }
|
||||
public void onUpdateColumnFamily(String ksName, String cfName) { }
|
||||
public void onUpdateUserType(String ksName, String typeName) { }
|
||||
public void onUpdateFunction(String ksName, String functionName) { }
|
||||
public void onUpdateAggregate(String ksName, String aggregateName) { }
|
||||
|
||||
public void onDropKeyspace(String ksName)
|
||||
{
|
||||
removeInvalidPreparedStatements(ksName, null);
|
||||
}
|
||||
|
||||
public void onDropColumnFamily(String ksName, String cfName)
|
||||
{
|
||||
removeInvalidPreparedStatements(ksName, cfName);
|
||||
}
|
||||
|
||||
public void onDropUserType(String ksName, String typeName) { }
|
||||
public void onDropFunction(String ksName, String functionName) {
|
||||
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, functionName);
|
||||
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, functionName);
|
||||
}
|
||||
public void onDropAggregate(String ksName, String aggregateName)
|
||||
{
|
||||
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, aggregateName);
|
||||
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, aggregateName);
|
||||
}
|
||||
|
||||
private void removeInvalidPreparedStatementsForFunction(Iterator<ParsedStatement.Prepared> iterator,
|
||||
String ksName, String functionName)
|
||||
{
|
||||
while (iterator.hasNext())
|
||||
if (iterator.next().statement.usesFunction(ksName, functionName))
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
public:
|
||||
future<> stop() {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
future<> stop();
|
||||
|
||||
friend class migration_subscriber;
|
||||
};
|
||||
|
||||
class query_processor::migration_subscriber : public service::migration_listener {
|
||||
query_processor* _qp;
|
||||
public:
|
||||
migration_subscriber(query_processor* qp);
|
||||
|
||||
virtual void on_create_keyspace(const sstring& ks_name) override;
|
||||
virtual void on_create_column_family(const sstring& ks_name, const sstring& cf_name) override;
|
||||
virtual void on_create_user_type(const sstring& ks_name, const sstring& type_name) override;
|
||||
virtual void on_create_function(const sstring& ks_name, const sstring& function_name) override;
|
||||
virtual void on_create_aggregate(const sstring& ks_name, const sstring& aggregate_name) override;
|
||||
|
||||
virtual void on_update_keyspace(const sstring& ks_name) override;
|
||||
virtual void on_update_column_family(const sstring& ks_name, const sstring& cf_name) override;
|
||||
virtual void on_update_user_type(const sstring& ks_name, const sstring& type_name) override;
|
||||
virtual void on_update_function(const sstring& ks_name, const sstring& function_name) override;
|
||||
virtual void on_update_aggregate(const sstring& ks_name, const sstring& aggregate_name) override;
|
||||
|
||||
virtual void on_drop_keyspace(const sstring& ks_name) override;
|
||||
virtual void on_drop_column_family(const sstring& ks_name, const sstring& cf_name) override;
|
||||
virtual void on_drop_user_type(const sstring& ks_name, const sstring& type_name) override;
|
||||
virtual void on_drop_function(const sstring& ks_name, const sstring& function_name) override;
|
||||
virtual void on_drop_aggregate(const sstring& ks_name, const sstring& aggregate_name) override;
|
||||
private:
|
||||
void remove_invalid_prepared_statements(sstring ks_name, std::experimental::optional<sstring> cf_name);
|
||||
bool should_invalidate(sstring ks_name, std::experimental::optional<sstring> cf_name, ::shared_ptr<cql_statement> statement);
|
||||
};
|
||||
|
||||
extern distributed<query_processor> _the_query_processor;
|
||||
|
||||
inline distributed<query_processor>& get_query_processor() {
|
||||
return _the_query_processor;
|
||||
}
|
||||
|
||||
inline query_processor& get_local_query_processor() {
|
||||
return _the_query_processor.local();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -45,6 +45,16 @@ namespace statements {
|
||||
|
||||
logging::logger batch_statement::_logger("BatchStatement");
|
||||
|
||||
bool batch_statement::depends_on_keyspace(const sstring& ks_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool batch_statement::depends_on_column_family(const sstring& cf_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -96,6 +96,10 @@ public:
|
||||
|| boost::algorithm::any_of(_statements, [&] (auto&& s) { return s->uses_function(ks_name, function_name); });
|
||||
}
|
||||
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
|
||||
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const override;
|
||||
|
||||
virtual uint32_t get_bound_terms() override {
|
||||
return _bound_terms;
|
||||
}
|
||||
|
||||
83
cql3/statements/cf_statement.cc
Normal file
83
cql3/statements/cf_statement.cc
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2014-2015 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cql3/statements/cf_statement.hh"
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
namespace statements {
|
||||
|
||||
cf_statement::cf_statement(::shared_ptr<cf_name> cf_name)
|
||||
: _cf_name(std::move(cf_name))
|
||||
{
|
||||
}
|
||||
|
||||
void cf_statement::prepare_keyspace(const service::client_state& state)
|
||||
{
|
||||
if (!_cf_name->has_keyspace()) {
|
||||
// XXX: We explicitely only want to call state.getKeyspace() in this case, as we don't want to throw
|
||||
// if not logged in any keyspace but a keyspace is explicitely set on the statement. So don't move
|
||||
// the call outside the 'if' or replace the method by 'prepareKeyspace(state.getKeyspace())'
|
||||
_cf_name->set_keyspace(state.get_keyspace(), true);
|
||||
}
|
||||
}
|
||||
|
||||
void cf_statement::prepare_keyspace(sstring keyspace)
|
||||
{
|
||||
if (!_cf_name->has_keyspace()) {
|
||||
_cf_name->set_keyspace(keyspace, true);
|
||||
}
|
||||
}
|
||||
|
||||
const sstring& cf_statement::keyspace() const
|
||||
{
|
||||
assert(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly";
|
||||
return _cf_name->get_keyspace();
|
||||
}
|
||||
|
||||
const sstring& cf_statement::column_family() const
|
||||
{
|
||||
return _cf_name->get_column_family();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -57,35 +57,16 @@ class cf_statement : public parsed_statement {
|
||||
protected:
|
||||
::shared_ptr<cf_name> _cf_name;
|
||||
|
||||
cf_statement(::shared_ptr<cf_name> cf_name)
|
||||
: _cf_name(std::move(cf_name))
|
||||
{ }
|
||||
|
||||
cf_statement(::shared_ptr<cf_name> cf_name);
|
||||
public:
|
||||
virtual void prepare_keyspace(const service::client_state& state) {
|
||||
if (!_cf_name->has_keyspace()) {
|
||||
// XXX: We explicitely only want to call state.getKeyspace() in this case, as we don't want to throw
|
||||
// if not logged in any keyspace but a keyspace is explicitely set on the statement. So don't move
|
||||
// the call outside the 'if' or replace the method by 'prepareKeyspace(state.getKeyspace())'
|
||||
_cf_name->set_keyspace(state.get_keyspace(), true);
|
||||
}
|
||||
}
|
||||
virtual void prepare_keyspace(const service::client_state& state);
|
||||
|
||||
// Only for internal calls, use the version with ClientState for user queries
|
||||
virtual void prepare_keyspace(sstring keyspace) {
|
||||
if (!_cf_name->has_keyspace()) {
|
||||
_cf_name->set_keyspace(keyspace, true);
|
||||
}
|
||||
}
|
||||
virtual void prepare_keyspace(sstring keyspace);
|
||||
|
||||
virtual const sstring& keyspace() const {
|
||||
assert(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly";
|
||||
return _cf_name->get_keyspace();
|
||||
}
|
||||
virtual const sstring& keyspace() const;
|
||||
|
||||
virtual const sstring& column_family() const {
|
||||
return _cf_name->get_column_family();
|
||||
}
|
||||
virtual const sstring& column_family() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
130
cql3/statements/create_keyspace_statement.cc
Normal file
130
cql3/statements/create_keyspace_statement.cc
Normal file
@@ -0,0 +1,130 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cql3/statements/create_keyspace_statement.hh"
|
||||
|
||||
#include "service/migration_manager.hh"
|
||||
|
||||
#include <regex>
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
namespace statements {
|
||||
|
||||
create_keyspace_statement::create_keyspace_statement(const sstring& name, shared_ptr<ks_prop_defs> attrs, bool if_not_exists)
|
||||
: _name{name}
|
||||
, _attrs{attrs}
|
||||
, _if_not_exists{if_not_exists}
|
||||
{
|
||||
}
|
||||
|
||||
const sstring& create_keyspace_statement::keyspace() const
|
||||
{
|
||||
return _name;
|
||||
}
|
||||
|
||||
void create_keyspace_statement::check_access(const service::client_state& state)
|
||||
{
|
||||
warn(unimplemented::cause::PERMISSIONS);
|
||||
#if 0
|
||||
state.hasAllKeyspacesAccess(Permission.CREATE);
|
||||
#endif
|
||||
}
|
||||
|
||||
void create_keyspace_statement::validate(distributed<service::storage_proxy>&, const service::client_state& state)
|
||||
{
|
||||
std::string name;
|
||||
name.resize(_name.length());
|
||||
std::transform(_name.begin(), _name.end(), name.begin(), ::tolower);
|
||||
if (name == db::system_keyspace::NAME) {
|
||||
throw exceptions::invalid_request_exception("system keyspace is not user-modifiable");
|
||||
}
|
||||
// keyspace name
|
||||
std::regex name_regex("\\w+");
|
||||
if (!std::regex_match(name, name_regex)) {
|
||||
throw exceptions::invalid_request_exception(sprint("\"%s\" is not a valid keyspace name", _name.c_str()));
|
||||
}
|
||||
if (name.length() > schema::NAME_LENGTH) {
|
||||
throw exceptions::invalid_request_exception(sprint("Keyspace names shouldn't be more than %d characters long (got \"%s\")", schema::NAME_LENGTH, _name.c_str()));
|
||||
}
|
||||
|
||||
_attrs->validate();
|
||||
|
||||
if (!bool(_attrs->get_replication_strategy_class())) {
|
||||
throw exceptions::configuration_exception("Missing mandatory replication strategy class");
|
||||
}
|
||||
#if 0
|
||||
// The strategy is validated through KSMetaData.validate() in announceNewKeyspace below.
|
||||
// However, for backward compatibility with thrift, this doesn't validate unexpected options yet,
|
||||
// so doing proper validation here.
|
||||
AbstractReplicationStrategy.validateReplicationStrategy(name,
|
||||
AbstractReplicationStrategy.getClass(attrs.getReplicationStrategyClass()),
|
||||
StorageService.instance.getTokenMetadata(),
|
||||
DatabaseDescriptor.getEndpointSnitch(),
|
||||
attrs.getReplicationOptions());
|
||||
#endif
|
||||
}
|
||||
|
||||
future<bool> create_keyspace_statement::announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only)
|
||||
{
|
||||
return make_ready_future<>().then([this, is_local_only] {
|
||||
return service::get_local_migration_manager().announce_new_keyspace(_attrs->as_ks_metadata(_name), is_local_only);
|
||||
}).then_wrapped([this] (auto&& f) {
|
||||
try {
|
||||
f.get();
|
||||
return true;
|
||||
} catch (const exceptions::already_exists_exception& e) {
|
||||
if (_if_not_exists) {
|
||||
return false;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
shared_ptr<transport::event::schema_change> create_keyspace_statement::change_event()
|
||||
{
|
||||
return make_shared<transport::event::schema_change>(transport::event::schema_change::change_type::CREATED, keyspace());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -41,11 +41,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <regex>
|
||||
|
||||
#include "cql3/statements/schema_altering_statement.hh"
|
||||
#include "cql3/statements/ks_prop_defs.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
#include "transport/event.hh"
|
||||
|
||||
#include "core/shared_ptr.hh"
|
||||
@@ -69,22 +66,11 @@ public:
|
||||
* @param name the name of the keyspace to create
|
||||
* @param attrs map of the raw keyword arguments that followed the <code>WITH</code> keyword.
|
||||
*/
|
||||
create_keyspace_statement(const sstring& name, shared_ptr<ks_prop_defs> attrs, bool if_not_exists)
|
||||
: _name{name}
|
||||
, _attrs{attrs}
|
||||
, _if_not_exists{if_not_exists}
|
||||
{ }
|
||||
create_keyspace_statement(const sstring& name, shared_ptr<ks_prop_defs> attrs, bool if_not_exists);
|
||||
|
||||
virtual const sstring& keyspace() const override {
|
||||
return _name;
|
||||
}
|
||||
virtual const sstring& keyspace() const override;
|
||||
|
||||
virtual void check_access(const service::client_state& state) override {
|
||||
warn(unimplemented::cause::PERMISSIONS);
|
||||
#if 0
|
||||
state.hasAllKeyspacesAccess(Permission.CREATE);
|
||||
#endif
|
||||
}
|
||||
virtual void check_access(const service::client_state& state) override;
|
||||
|
||||
/**
|
||||
* The <code>CqlParser</code> only goes as far as extracting the keyword arguments
|
||||
@@ -93,58 +79,11 @@ public:
|
||||
*
|
||||
* @throws InvalidRequestException if arguments are missing or unacceptable
|
||||
*/
|
||||
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override {
|
||||
std::string name;
|
||||
name.resize(_name.length());
|
||||
std::transform(_name.begin(), _name.end(), name.begin(), ::tolower);
|
||||
if (name == db::system_keyspace::NAME) {
|
||||
throw exceptions::invalid_request_exception("system keyspace is not user-modifiable");
|
||||
}
|
||||
// keyspace name
|
||||
std::regex name_regex("\\w+");
|
||||
if (!std::regex_match(name, name_regex)) {
|
||||
throw exceptions::invalid_request_exception(sprint("\"%s\" is not a valid keyspace name", _name.c_str()));
|
||||
}
|
||||
if (name.length() > schema::NAME_LENGTH) {
|
||||
throw exceptions::invalid_request_exception(sprint("Keyspace names shouldn't be more than %d characters long (got \"%s\")", schema::NAME_LENGTH, _name.c_str()));
|
||||
}
|
||||
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
|
||||
|
||||
_attrs->validate();
|
||||
virtual future<bool> announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only) override;
|
||||
|
||||
if (!bool(_attrs->get_replication_strategy_class())) {
|
||||
throw exceptions::configuration_exception("Missing mandatory replication strategy class");
|
||||
}
|
||||
#if 0
|
||||
// The strategy is validated through KSMetaData.validate() in announceNewKeyspace below.
|
||||
// However, for backward compatibility with thrift, this doesn't validate unexpected options yet,
|
||||
// so doing proper validation here.
|
||||
AbstractReplicationStrategy.validateReplicationStrategy(name,
|
||||
AbstractReplicationStrategy.getClass(attrs.getReplicationStrategyClass()),
|
||||
StorageService.instance.getTokenMetadata(),
|
||||
DatabaseDescriptor.getEndpointSnitch(),
|
||||
attrs.getReplicationOptions());
|
||||
#endif
|
||||
}
|
||||
|
||||
virtual future<bool> announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only) override {
|
||||
return make_ready_future<>().then([this, is_local_only] {
|
||||
return service::get_local_migration_manager().announce_new_keyspace(_attrs->as_ks_metadata(_name), is_local_only);
|
||||
}).then_wrapped([this] (auto&& f) {
|
||||
try {
|
||||
f.get();
|
||||
return true;
|
||||
} catch (const exceptions::already_exists_exception& e) {
|
||||
if (_if_not_exists) {
|
||||
return false;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
virtual shared_ptr<transport::event::schema_change> change_event() override {
|
||||
return make_shared<transport::event::schema_change>(transport::event::schema_change::change_type::CREATED, keyspace());
|
||||
}
|
||||
virtual shared_ptr<transport::event::schema_change> change_event() override;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -541,6 +541,14 @@ modification_statement::validate(distributed<service::storage_proxy>&, const ser
|
||||
}
|
||||
}
|
||||
|
||||
bool modification_statement::depends_on_keyspace(const sstring& ks_name) const {
|
||||
return keyspace() == ks_name;
|
||||
}
|
||||
|
||||
bool modification_statement::depends_on_column_family(const sstring& cf_name) const {
|
||||
return column_family() == cf_name;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -188,6 +188,10 @@ public:
|
||||
|
||||
void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
|
||||
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
|
||||
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const override;
|
||||
|
||||
void add_operation(::shared_ptr<operation> op) {
|
||||
if (op->column.is_static()) {
|
||||
_sets_static_columns = true;
|
||||
|
||||
@@ -47,6 +47,50 @@ namespace cql3 {
|
||||
|
||||
namespace statements {
|
||||
|
||||
schema_altering_statement::schema_altering_statement()
|
||||
: cf_statement{::shared_ptr<cf_name>{}}
|
||||
, _is_column_family_level{false}
|
||||
{
|
||||
}
|
||||
|
||||
schema_altering_statement::schema_altering_statement(::shared_ptr<cf_name> name)
|
||||
: cf_statement{std::move(name)}
|
||||
, _is_column_family_level{true}
|
||||
{
|
||||
}
|
||||
|
||||
bool schema_altering_statement::uses_function(const sstring& ks_name, const sstring& function_name) const
|
||||
{
|
||||
return cf_statement::uses_function(ks_name, function_name);
|
||||
}
|
||||
|
||||
bool schema_altering_statement::depends_on_keyspace(const sstring& ks_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool schema_altering_statement::depends_on_column_family(const sstring& cf_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t schema_altering_statement::get_bound_terms()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void schema_altering_statement::prepare_keyspace(const service::client_state& state)
|
||||
{
|
||||
if (_is_column_family_level) {
|
||||
cf_statement::prepare_keyspace(state);
|
||||
}
|
||||
}
|
||||
|
||||
::shared_ptr<parsed_statement::prepared> schema_altering_statement::prepare(database& db)
|
||||
{
|
||||
return ::make_shared<parsed_statement::prepared>(this->shared_from_this());
|
||||
}
|
||||
|
||||
future<::shared_ptr<messages::result_message>>
|
||||
schema_altering_statement::execute0(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options, bool is_local_only) {
|
||||
// If an IF [NOT] EXISTS clause was used, this may not result in an actual schema change. To avoid doing
|
||||
|
||||
@@ -41,16 +41,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace transport {
|
||||
|
||||
namespace messages {
|
||||
|
||||
class result_message;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#include "transport/messages_fwd.hh"
|
||||
#include "transport/event.hh"
|
||||
|
||||
#include "cql3/statements/cf_statement.hh"
|
||||
@@ -76,33 +67,21 @@ private:
|
||||
future<::shared_ptr<messages::result_message>>
|
||||
execute0(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options, bool);
|
||||
protected:
|
||||
schema_altering_statement()
|
||||
: cf_statement{::shared_ptr<cf_name>{}}
|
||||
, _is_column_family_level{false}
|
||||
{ }
|
||||
schema_altering_statement();
|
||||
|
||||
schema_altering_statement(::shared_ptr<cf_name> name)
|
||||
: cf_statement{std::move(name)}
|
||||
, _is_column_family_level{true}
|
||||
{ }
|
||||
schema_altering_statement(::shared_ptr<cf_name> name);
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return cf_statement::uses_function(ks_name, function_name);
|
||||
}
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override;
|
||||
|
||||
virtual uint32_t get_bound_terms() override {
|
||||
return 0;
|
||||
}
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
|
||||
|
||||
virtual void prepare_keyspace(const service::client_state& state) override {
|
||||
if (_is_column_family_level) {
|
||||
cf_statement::prepare_keyspace(state);
|
||||
}
|
||||
}
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const override;
|
||||
|
||||
virtual ::shared_ptr<prepared> prepare(database& db) override {
|
||||
return ::make_shared<parsed_statement::prepared>(this->shared_from_this());
|
||||
}
|
||||
virtual uint32_t get_bound_terms() override;
|
||||
|
||||
virtual void prepare_keyspace(const service::client_state& state) override;
|
||||
|
||||
virtual ::shared_ptr<prepared> prepare(database& db) override;
|
||||
|
||||
virtual shared_ptr<transport::event::schema_change> change_event() = 0;
|
||||
|
||||
|
||||
@@ -106,6 +106,14 @@ void select_statement::validate(distributed<service::storage_proxy>&, const serv
|
||||
// Nothing to do, all validation has been done by raw_statemet::prepare()
|
||||
}
|
||||
|
||||
bool select_statement::depends_on_keyspace(const sstring& ks_name) const {
|
||||
return keyspace() == ks_name;
|
||||
}
|
||||
|
||||
bool select_statement::depends_on_column_family(const sstring& cf_name) const {
|
||||
return column_family() == cf_name;
|
||||
}
|
||||
|
||||
query::partition_slice
|
||||
select_statement::make_partition_slice(const query_options& options) {
|
||||
std::vector<column_id> static_columns;
|
||||
|
||||
@@ -132,6 +132,8 @@ public:
|
||||
virtual uint32_t get_bound_terms() override;
|
||||
virtual void check_access(const service::client_state& state) override;
|
||||
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const;
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const;
|
||||
|
||||
virtual future<::shared_ptr<transport::messages::result_message>> execute(distributed<service::storage_proxy>& proxy,
|
||||
service::query_state& state, const query_options& options) override;
|
||||
@@ -192,18 +194,16 @@ public:
|
||||
QueryOptions options = QueryOptions.DEFAULT;
|
||||
return process(rows, options, getLimit(options), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
public String keyspace()
|
||||
{
|
||||
return _schema.ks_name;
|
||||
}
|
||||
|
||||
public String columnFamily()
|
||||
{
|
||||
return _schema.cfName;
|
||||
}
|
||||
#endif
|
||||
|
||||
const sstring& keyspace() const {
|
||||
return _schema->ks_name();
|
||||
}
|
||||
|
||||
const sstring& column_family() const {
|
||||
return _schema->cf_name();
|
||||
}
|
||||
|
||||
query::partition_slice make_partition_slice(const query_options& options);
|
||||
|
||||
#if 0
|
||||
|
||||
@@ -68,6 +68,16 @@ bool truncate_statement::uses_function(const sstring& ks_name, const sstring& fu
|
||||
return parsed_statement::uses_function(ks_name, function_name);
|
||||
}
|
||||
|
||||
bool truncate_statement::depends_on_keyspace(const sstring& ks_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool truncate_statement::depends_on_column_family(const sstring& cf_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void truncate_statement::check_access(const service::client_state& state)
|
||||
{
|
||||
warn(unimplemented::cause::AUTH);
|
||||
|
||||
@@ -60,6 +60,10 @@ public:
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override;
|
||||
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
|
||||
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const override;
|
||||
|
||||
virtual void check_access(const service::client_state& state) override;
|
||||
|
||||
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
|
||||
|
||||
@@ -47,6 +47,45 @@ namespace cql3 {
|
||||
|
||||
namespace statements {
|
||||
|
||||
use_statement::use_statement(sstring keyspace)
|
||||
: _keyspace(keyspace)
|
||||
{
|
||||
}
|
||||
|
||||
uint32_t use_statement::get_bound_terms()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
::shared_ptr<parsed_statement::prepared> use_statement::prepare(database& db)
|
||||
{
|
||||
return ::make_shared<parsed_statement::prepared>(this->shared_from_this());
|
||||
}
|
||||
|
||||
bool use_statement::uses_function(const sstring& ks_name, const sstring& function_name) const
|
||||
{
|
||||
return parsed_statement::uses_function(ks_name, function_name);
|
||||
}
|
||||
|
||||
bool use_statement::depends_on_keyspace(const sstring& ks_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool use_statement::depends_on_column_family(const sstring& cf_name) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void use_statement::check_access(const service::client_state& state)
|
||||
{
|
||||
state.validate_login();
|
||||
}
|
||||
|
||||
void use_statement::validate(distributed<service::storage_proxy>&, const service::client_state& state)
|
||||
{
|
||||
}
|
||||
|
||||
future<::shared_ptr<transport::messages::result_message>>
|
||||
use_statement::execute(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) {
|
||||
state.get_client_state().set_keyspace(proxy.local().get_db(), _keyspace);
|
||||
|
||||
@@ -42,17 +42,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "cql3/statements/parsed_statement.hh"
|
||||
#include "transport/messages_fwd.hh"
|
||||
#include "cql3/cql_statement.hh"
|
||||
|
||||
namespace transport {
|
||||
|
||||
namespace messages {
|
||||
|
||||
class result_message;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
namespace cql3 {
|
||||
|
||||
namespace statements {
|
||||
@@ -62,49 +54,27 @@ private:
|
||||
const sstring _keyspace;
|
||||
|
||||
public:
|
||||
use_statement(sstring keyspace)
|
||||
: _keyspace(keyspace)
|
||||
{ }
|
||||
use_statement(sstring keyspace);
|
||||
|
||||
virtual uint32_t get_bound_terms() override {
|
||||
return 0;
|
||||
}
|
||||
virtual uint32_t get_bound_terms() override;
|
||||
|
||||
virtual ::shared_ptr<prepared> prepare(database& db) override {
|
||||
return ::make_shared<parsed_statement::prepared>(this->shared_from_this());
|
||||
}
|
||||
virtual ::shared_ptr<prepared> prepare(database& db) override;
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return parsed_statement::uses_function(ks_name, function_name);
|
||||
}
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override;
|
||||
|
||||
virtual void check_access(const service::client_state& state) override {
|
||||
state.validate_login();
|
||||
}
|
||||
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
|
||||
|
||||
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override {
|
||||
}
|
||||
virtual bool depends_on_column_family(const sstring& cf_name) const override;
|
||||
|
||||
virtual void check_access(const service::client_state& state) override;
|
||||
|
||||
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
|
||||
|
||||
virtual future<::shared_ptr<transport::messages::result_message>>
|
||||
execute(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) override;
|
||||
|
||||
virtual future<::shared_ptr<transport::messages::result_message>>
|
||||
execute_internal(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) override;
|
||||
|
||||
#if 0
|
||||
virtual future<::shared_ptr<transport::messages::result_message>>
|
||||
execute(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) override {
|
||||
state.get_client_state().set_keyspace(_keyspace);
|
||||
auto result =::make_shared<transport::messages::result_message::set_keyspace>(_keyspace);
|
||||
return make_ready_future<::shared_ptr<transport::messages::result_message>>(result);
|
||||
}
|
||||
|
||||
virtual future<::shared_ptr<transport::messages::result_message>>
|
||||
execute_internal(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) override {
|
||||
// Internal queries are exclusively on the system keyspace and 'use' is thus useless
|
||||
throw std::runtime_error("unsupported operation");
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
450
database.cc
450
database.cc
@@ -34,6 +34,7 @@
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include "core/seastar.hh"
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include <seastar/core/rwlock.hh>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include "sstables/sstables.hh"
|
||||
@@ -53,20 +54,41 @@
|
||||
#include "mutation_query.hh"
|
||||
#include "sstable_mutation_readers.hh"
|
||||
#include <core/fstream.hh>
|
||||
#include <seastar/core/enum.hh>
|
||||
#include "utils/latency.hh"
|
||||
#include "utils/flush_queue.hh"
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
logging::logger dblog("database");
|
||||
|
||||
// Slight extension to the flush_queue type.
|
||||
class column_family::memtable_flush_queue : public utils::flush_queue<db::replay_position> {
|
||||
public:
|
||||
template<typename Func, typename Post>
|
||||
auto run_cf_flush(db::replay_position rp, Func&& func, Post&& post) {
|
||||
// special case: empty rp, yet still data.
|
||||
// We generate a few memtables with no valid, "high_rp", yet
|
||||
// still containing data -> actual flush.
|
||||
// And to make matters worse, we can initiate a flush of N such
|
||||
// tables at the same time.
|
||||
// Just queue them at the end of the queue and treat them as such.
|
||||
if (rp == db::replay_position() && !empty()) {
|
||||
rp = highest_key();
|
||||
}
|
||||
return run_with_ordered_post_op(rp, std::forward<Func>(func), std::forward<Post>(post));
|
||||
}
|
||||
};
|
||||
|
||||
column_family::column_family(schema_ptr schema, config config, db::commitlog& cl, compaction_manager& compaction_manager)
|
||||
: _schema(std::move(schema))
|
||||
, _config(std::move(config))
|
||||
, _memtables(make_lw_shared(memtable_list{}))
|
||||
, _sstables(make_lw_shared<sstable_list>())
|
||||
, _cache(_schema, sstables_as_mutation_source(), global_cache_tracker())
|
||||
, _cache(_schema, sstables_as_mutation_source(), sstables_as_key_source(), global_cache_tracker())
|
||||
, _commitlog(&cl)
|
||||
, _compaction_manager(compaction_manager)
|
||||
, _flush_queue(std::make_unique<memtable_flush_queue>())
|
||||
{
|
||||
add_memtable();
|
||||
if (!_config.enable_disk_writes) {
|
||||
@@ -79,9 +101,10 @@ column_family::column_family(schema_ptr schema, config config, no_commitlog cl,
|
||||
, _config(std::move(config))
|
||||
, _memtables(make_lw_shared(memtable_list{}))
|
||||
, _sstables(make_lw_shared<sstable_list>())
|
||||
, _cache(_schema, sstables_as_mutation_source(), global_cache_tracker())
|
||||
, _cache(_schema, sstables_as_mutation_source(), sstables_as_key_source(), global_cache_tracker())
|
||||
, _commitlog(nullptr)
|
||||
, _compaction_manager(compaction_manager)
|
||||
, _flush_queue(std::make_unique<memtable_flush_queue>())
|
||||
{
|
||||
add_memtable();
|
||||
if (!_config.enable_disk_writes) {
|
||||
@@ -196,6 +219,24 @@ column_family::make_sstable_reader(const query::partition_range& pr) const {
|
||||
}
|
||||
}
|
||||
|
||||
key_source column_family::sstables_as_key_source() const {
|
||||
return [this] (const query::partition_range& range) {
|
||||
std::vector<key_reader> readers;
|
||||
readers.reserve(_sstables->size());
|
||||
std::transform(_sstables->begin(), _sstables->end(), std::back_inserter(readers), [&] (auto&& entry) {
|
||||
auto& sst = entry.second;
|
||||
auto rd = sstables::make_key_reader(_schema, sst, range);
|
||||
if (sst->is_shared()) {
|
||||
rd = make_filtering_reader(std::move(rd), [] (const dht::decorated_key& dk) {
|
||||
return dht::shard_of(dk.token()) == engine().cpu_id();
|
||||
});
|
||||
}
|
||||
return rd;
|
||||
});
|
||||
return make_combined_reader(_schema, std::move(readers));
|
||||
};
|
||||
}
|
||||
|
||||
// Exposed for testing, not performance critical.
|
||||
future<column_family::const_mutation_partition_ptr>
|
||||
column_family::find_partition(const dht::decorated_key& key) const {
|
||||
@@ -259,8 +300,8 @@ column_family::make_reader(const query::partition_range& range) const {
|
||||
// range queries in cache, so that scans can always be satisfied form
|
||||
// memtable and cache only, as long as data is not evicted.
|
||||
//
|
||||
// https://github.com/cloudius-systems/urchin/issues/309
|
||||
// https://github.com/cloudius-systems/urchin/issues/185
|
||||
// https://github.com/scylladb/scylla/issues/309
|
||||
// https://github.com/scylladb/scylla/issues/185
|
||||
|
||||
for (auto&& mt : *_memtables) {
|
||||
readers.emplace_back(mt->make_reader(range));
|
||||
@@ -315,14 +356,17 @@ column_family::for_all_partitions_slow(std::function<bool (const dht::decorated_
|
||||
}
|
||||
|
||||
class lister {
|
||||
public:
|
||||
using dir_entry_types = std::unordered_set<directory_entry_type, enum_hash<directory_entry_type>>;
|
||||
private:
|
||||
file _f;
|
||||
std::function<future<> (directory_entry de)> _walker;
|
||||
directory_entry_type _expected_type;
|
||||
dir_entry_types _expected_type;
|
||||
subscription<directory_entry> _listing;
|
||||
sstring _dirname;
|
||||
|
||||
public:
|
||||
lister(file f, directory_entry_type type, std::function<future<> (directory_entry)> walker, sstring dirname)
|
||||
lister(file f, dir_entry_types type, std::function<future<> (directory_entry)> walker, sstring dirname)
|
||||
: _f(std::move(f))
|
||||
, _walker(std::move(walker))
|
||||
, _expected_type(type)
|
||||
@@ -330,13 +374,13 @@ public:
|
||||
, _dirname(dirname) {
|
||||
}
|
||||
|
||||
static future<> scan_dir(sstring name, directory_entry_type type, std::function<future<> (directory_entry)> walker);
|
||||
static future<> scan_dir(sstring name, dir_entry_types type, std::function<future<> (directory_entry)> walker);
|
||||
protected:
|
||||
future<> _visit(directory_entry de) {
|
||||
|
||||
return guarantee_type(std::move(de)).then([this] (directory_entry de) {
|
||||
// Hide all synthetic directories and hidden files.
|
||||
if ((de.type != _expected_type) || (de.name[0] == '.')) {
|
||||
if ((!_expected_type.count(*(de.type))) || (de.name[0] == '.')) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return _walker(de);
|
||||
@@ -359,8 +403,7 @@ private:
|
||||
};
|
||||
|
||||
|
||||
future<> lister::scan_dir(sstring name, directory_entry_type type, std::function<future<> (directory_entry)> walker) {
|
||||
|
||||
future<> lister::scan_dir(sstring name, lister::dir_entry_types type, std::function<future<> (directory_entry)> walker) {
|
||||
return engine().open_directory(name).then([type, walker = std::move(walker), name] (file f) {
|
||||
auto l = make_lw_shared<lister>(std::move(f), type, walker, name);
|
||||
return l->done().then([l] { });
|
||||
@@ -386,8 +429,7 @@ future<sstables::entry_descriptor> column_family::probe_file(sstring sstdir, sst
|
||||
return make_ready_future<entry_descriptor>(std::move(comps));
|
||||
}
|
||||
|
||||
// Make sure new sstables don't overwrite this one.
|
||||
_sstable_generation = std::max<uint64_t>(_sstable_generation, comps.generation / smp::count + 1);
|
||||
update_sstables_known_generation(comps.generation);
|
||||
assert(_sstables->count(comps.generation) == 0);
|
||||
|
||||
auto sst = std::make_unique<sstables::sstable>(_schema->ks_name(), _schema->cf_name(), sstdir, comps.generation, comps.version, comps.format);
|
||||
@@ -477,8 +519,17 @@ column_family::seal_active_memtable() {
|
||||
);
|
||||
_highest_flushed_rp = old->replay_position();
|
||||
|
||||
return seastar::with_gate(_in_flight_seals, [old, this] {
|
||||
return flush_memtable_to_sstable(old);
|
||||
return _flush_queue->run_cf_flush(old->replay_position(), [old, this] {
|
||||
return repeat([this, old] {
|
||||
return with_lock(_sstables_lock.for_read(), [this, old] {
|
||||
_flush_queue->check_open_gate();
|
||||
return try_flush_memtable_to_sstable(old);
|
||||
});
|
||||
});
|
||||
}, [old, this] {
|
||||
if (_commitlog) {
|
||||
_commitlog->discard_completed_segments(_schema->id(), old->replay_position());
|
||||
}
|
||||
});
|
||||
// FIXME: release commit log
|
||||
// FIXME: provide back-pressure to upper layers
|
||||
@@ -530,26 +581,13 @@ column_family::try_flush_memtable_to_sstable(lw_shared_ptr<memtable> old) {
|
||||
try {
|
||||
ret.get();
|
||||
|
||||
// FIXME: until the surrounding function returns a future and
|
||||
// caller ensures ordering (i.e. finish flushing one or more sequential tables before
|
||||
// doing the discard), this below is _not_ correct, since the use of replay_position
|
||||
// depends on us reporting the factual highest position we've actually flushed,
|
||||
// _and_ all positions (for a given UUID) below having been dealt with.
|
||||
//
|
||||
// Note that the whole scheme is also dependent on memtables being "allocated" in order,
|
||||
// i.e. we may not flush a younger memtable before and older, and we need to use the
|
||||
// highest rp.
|
||||
if (_commitlog) {
|
||||
_commitlog->discard_completed_segments(_schema->id(), old->replay_position());
|
||||
}
|
||||
_memtables->erase(boost::range::find(*_memtables, old));
|
||||
dblog.debug("Memtable replaced");
|
||||
trigger_compaction();
|
||||
|
||||
return make_ready_future<stop_iteration>(stop_iteration::yes);
|
||||
} catch (std::exception& e) {
|
||||
dblog.error("failed to write sstable: {}", e.what());
|
||||
} catch (...) {
|
||||
dblog.error("failed to write sstable: unknown error");
|
||||
dblog.error("failed to write sstable: {}", std::current_exception());
|
||||
}
|
||||
return sleep(10s).then([] {
|
||||
return make_ready_future<stop_iteration>(stop_iteration::no);
|
||||
@@ -557,15 +595,6 @@ column_family::try_flush_memtable_to_sstable(lw_shared_ptr<memtable> old) {
|
||||
});
|
||||
}
|
||||
|
||||
future<>
|
||||
column_family::flush_memtable_to_sstable(lw_shared_ptr<memtable> memt) {
|
||||
return repeat([this, memt] {
|
||||
return seastar::with_gate(_in_flight_seals, [memt, this] {
|
||||
return try_flush_memtable_to_sstable(memt);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
column_family::start() {
|
||||
// FIXME: add option to disable automatic compaction.
|
||||
@@ -575,70 +604,136 @@ column_family::start() {
|
||||
future<>
|
||||
column_family::stop() {
|
||||
seal_active_memtable();
|
||||
|
||||
return _compaction_manager.remove(this).then([this] {
|
||||
return _in_flight_seals.close().then([this] {
|
||||
return make_ready_future<>();
|
||||
return _flush_queue->close();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
future<std::vector<sstables::entry_descriptor>>
|
||||
column_family::reshuffle_sstables(int64_t start) {
|
||||
struct work {
|
||||
int64_t current_gen;
|
||||
sstable_list sstables;
|
||||
std::unordered_map<int64_t, sstables::entry_descriptor> descriptors;
|
||||
std::vector<sstables::entry_descriptor> reshuffled;
|
||||
work(int64_t start) : current_gen(start ? start : 1) {}
|
||||
};
|
||||
|
||||
return do_with(work(start), [this] (work& work) {
|
||||
return lister::scan_dir(_config.datadir, { directory_entry_type::regular }, [this, &work] (directory_entry de) {
|
||||
auto comps = sstables::entry_descriptor::make_descriptor(de.name);
|
||||
if (comps.component != sstables::sstable::component_type::TOC) {
|
||||
return make_ready_future<>();
|
||||
} else if (comps.generation < work.current_gen) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
auto sst = make_lw_shared<sstables::sstable>(_schema->ks_name(), _schema->cf_name(),
|
||||
_config.datadir, comps.generation,
|
||||
comps.version, comps.format);
|
||||
work.sstables.emplace(comps.generation, std::move(sst));
|
||||
work.descriptors.emplace(comps.generation, std::move(comps));
|
||||
// FIXME: This is the only place in which we actually issue disk activity aside from
|
||||
// directory metadata operations.
|
||||
//
|
||||
// But without the TOC information, we don't know which files we should link.
|
||||
// The alternative to that would be to change create link to try creating a
|
||||
// link for all possible files and handling the failures gracefuly, but that's not
|
||||
// exactly fast either.
|
||||
//
|
||||
// Those SSTables are not known by anyone in the system. So we don't have any kind of
|
||||
// object describing them. There isn't too much of a choice.
|
||||
return work.sstables[comps.generation]->read_toc();
|
||||
}).then([&work] {
|
||||
// Note: cannot be parallel because we will be shuffling things around at this stage. Can't race.
|
||||
return do_for_each(work.sstables, [&work] (auto& pair) {
|
||||
auto&& comps = std::move(work.descriptors.at(pair.first));
|
||||
comps.generation = work.current_gen;
|
||||
work.reshuffled.push_back(std::move(comps));
|
||||
|
||||
if (pair.first == work.current_gen) {
|
||||
++work.current_gen;
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return pair.second->set_generation(work.current_gen++);
|
||||
});
|
||||
}).then([&work] {
|
||||
return make_ready_future<std::vector<sstables::entry_descriptor>>(std::move(work.reshuffled));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<>
|
||||
column_family::compact_sstables(std::vector<sstables::shared_sstable> sstables) {
|
||||
if (!sstables.size()) {
|
||||
column_family::compact_sstables(sstables::compaction_descriptor descriptor) {
|
||||
if (!descriptor.sstables.size()) {
|
||||
// if there is nothing to compact, just return.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
auto sstables_to_compact = make_lw_shared<std::vector<sstables::shared_sstable>>(std::move(sstables));
|
||||
return with_lock(_sstables_lock.for_read(), [this, descriptor = std::move(descriptor)] {
|
||||
auto sstables_to_compact = make_lw_shared<std::vector<sstables::shared_sstable>>(std::move(descriptor.sstables));
|
||||
|
||||
auto new_tables = make_lw_shared<std::vector<
|
||||
std::pair<unsigned, sstables::shared_sstable>>>();
|
||||
auto create_sstable = [this, new_tables] {
|
||||
// FIXME: this generation calculation should be in a function.
|
||||
auto gen = _sstable_generation++ * smp::count + engine().cpu_id();
|
||||
// FIXME: use "tmp" marker in names of incomplete sstable
|
||||
auto sst = make_lw_shared<sstables::sstable>(_schema->ks_name(), _schema->cf_name(), _config.datadir, gen,
|
||||
sstables::sstable::version_types::ka,
|
||||
sstables::sstable::format_types::big);
|
||||
sst->set_unshared();
|
||||
new_tables->emplace_back(gen, sst);
|
||||
return sst;
|
||||
};
|
||||
return sstables::compact_sstables(*sstables_to_compact, *this,
|
||||
create_sstable).then([this, new_tables, sstables_to_compact] {
|
||||
// Build a new list of _sstables: We remove from the existing list the
|
||||
// tables we compacted (by now, there might be more sstables flushed
|
||||
// later), and we add the new tables generated by the compaction.
|
||||
// We create a new list rather than modifying it in-place, so that
|
||||
// on-going reads can continue to use the old list.
|
||||
auto current_sstables = _sstables;
|
||||
_sstables = make_lw_shared<sstable_list>();
|
||||
auto new_tables = make_lw_shared<std::vector<
|
||||
std::pair<unsigned, sstables::shared_sstable>>>();
|
||||
auto create_sstable = [this, new_tables] {
|
||||
// FIXME: this generation calculation should be in a function.
|
||||
auto gen = _sstable_generation++ * smp::count + engine().cpu_id();
|
||||
// FIXME: use "tmp" marker in names of incomplete sstable
|
||||
auto sst = make_lw_shared<sstables::sstable>(_schema->ks_name(), _schema->cf_name(), _config.datadir, gen,
|
||||
sstables::sstable::version_types::ka,
|
||||
sstables::sstable::format_types::big);
|
||||
sst->set_unshared();
|
||||
new_tables->emplace_back(gen, sst);
|
||||
return sst;
|
||||
};
|
||||
return sstables::compact_sstables(*sstables_to_compact, *this,
|
||||
create_sstable, descriptor.max_sstable_bytes, descriptor.level).then([this, new_tables, sstables_to_compact] {
|
||||
// Build a new list of _sstables: We remove from the existing list the
|
||||
// tables we compacted (by now, there might be more sstables flushed
|
||||
// later), and we add the new tables generated by the compaction.
|
||||
// We create a new list rather than modifying it in-place, so that
|
||||
// on-going reads can continue to use the old list.
|
||||
auto current_sstables = _sstables;
|
||||
_sstables = make_lw_shared<sstable_list>();
|
||||
|
||||
// zeroing live_disk_space_used and live_sstable_count because the
|
||||
// sstable list is re-created below.
|
||||
_stats.live_disk_space_used = 0;
|
||||
_stats.live_sstable_count = 0;
|
||||
// zeroing live_disk_space_used and live_sstable_count because the
|
||||
// sstable list is re-created below.
|
||||
_stats.live_disk_space_used = 0;
|
||||
_stats.live_sstable_count = 0;
|
||||
|
||||
std::unordered_set<sstables::shared_sstable> s(
|
||||
sstables_to_compact->begin(), sstables_to_compact->end());
|
||||
for (const auto& oldtab : *current_sstables) {
|
||||
if (!s.count(oldtab.second)) {
|
||||
update_stats_for_new_sstable(oldtab.second->data_size());
|
||||
_sstables->emplace(oldtab.first, oldtab.second);
|
||||
std::unordered_set<sstables::shared_sstable> s(
|
||||
sstables_to_compact->begin(), sstables_to_compact->end());
|
||||
for (const auto& oldtab : *current_sstables) {
|
||||
if (!s.count(oldtab.second)) {
|
||||
update_stats_for_new_sstable(oldtab.second->data_size());
|
||||
_sstables->emplace(oldtab.first, oldtab.second);
|
||||
}
|
||||
|
||||
for (const auto& newtab : *new_tables) {
|
||||
// FIXME: rename the new sstable(s). Verify a rename doesn't cause
|
||||
// problems for the sstable object.
|
||||
update_stats_for_new_sstable(newtab.second->data_size());
|
||||
_sstables->emplace(newtab.first, newtab.second);
|
||||
}
|
||||
|
||||
for (const auto& oldtab : *sstables_to_compact) {
|
||||
oldtab->mark_for_deletion();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
for (const auto& newtab : *new_tables) {
|
||||
// FIXME: rename the new sstable(s). Verify a rename doesn't cause
|
||||
// problems for the sstable object.
|
||||
update_stats_for_new_sstable(newtab.second->data_size());
|
||||
_sstables->emplace(newtab.first, newtab.second);
|
||||
}
|
||||
|
||||
for (const auto& oldtab : *sstables_to_compact) {
|
||||
oldtab->mark_for_deletion();
|
||||
}
|
||||
future<>
|
||||
column_family::load_new_sstables(std::vector<sstables::entry_descriptor> new_tables) {
|
||||
return parallel_for_each(new_tables, [this] (auto comps) {
|
||||
auto sst = make_lw_shared<sstables::sstable>(_schema->ks_name(), _schema->cf_name(), _config.datadir, comps.generation, comps.version, comps.format);
|
||||
return sst->load().then([this, sst] {
|
||||
return sst->mutate_sstable_level(0);
|
||||
}).then([this, sst] {
|
||||
this->add_sstable(sst);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -653,7 +748,7 @@ column_family::compact_all_sstables() {
|
||||
}
|
||||
// FIXME: check if the lower bound min_compaction_threshold() from schema
|
||||
// should be taken into account before proceeding with compaction.
|
||||
return compact_sstables(std::move(sstables));
|
||||
return compact_sstables(sstables::compaction_descriptor(std::move(sstables)));
|
||||
}
|
||||
|
||||
void column_family::start_compaction() {
|
||||
@@ -728,7 +823,7 @@ future<> column_family::populate(sstring sstdir) {
|
||||
auto verifier = make_lw_shared<std::unordered_map<unsigned long, status>>();
|
||||
auto descriptor = make_lw_shared<sstable_descriptor>();
|
||||
|
||||
return lister::scan_dir(sstdir, directory_entry_type::regular, [this, sstdir, verifier, descriptor] (directory_entry de) {
|
||||
return lister::scan_dir(sstdir, { directory_entry_type::regular }, [this, sstdir, verifier, descriptor] (directory_entry de) {
|
||||
// FIXME: The secondary indexes are in this level, but with a directory type, (starting with ".")
|
||||
return probe_file(sstdir, de.name).then([verifier, descriptor] (auto entry) {
|
||||
if (verifier->count(entry.generation)) {
|
||||
@@ -835,7 +930,7 @@ future<> database::populate_keyspace(sstring datadir, sstring ks_name) {
|
||||
dblog.warn("Skipping undefined keyspace: {}", ks_name);
|
||||
} else {
|
||||
dblog.info("Populating Keyspace {}", ks_name);
|
||||
return lister::scan_dir(ksdir, directory_entry_type::directory, [this, ksdir, ks_name] (directory_entry de) {
|
||||
return lister::scan_dir(ksdir, { directory_entry_type::directory }, [this, ksdir, ks_name] (directory_entry de) {
|
||||
auto comps = parse_fname(de.name);
|
||||
if (comps.size() < 2) {
|
||||
dblog.error("Keyspace {}: Skipping malformed CF {} ", ksdir, de.name);
|
||||
@@ -860,7 +955,7 @@ future<> database::populate_keyspace(sstring datadir, sstring ks_name) {
|
||||
}
|
||||
|
||||
future<> database::populate(sstring datadir) {
|
||||
return lister::scan_dir(datadir, directory_entry_type::directory, [this, datadir] (directory_entry de) {
|
||||
return lister::scan_dir(datadir, { directory_entry_type::directory }, [this, datadir] (directory_entry de) {
|
||||
auto& ks_name = de.name;
|
||||
if (ks_name == "system") {
|
||||
return make_ready_future<>();
|
||||
@@ -1163,7 +1258,7 @@ keyspace::column_family_directory(const sstring& name, utils::UUID uuid) const {
|
||||
|
||||
future<>
|
||||
keyspace::make_directory_for_column_family(const sstring& name, utils::UUID uuid) {
|
||||
return make_directory(column_family_directory(name, uuid));
|
||||
return touch_directory(column_family_directory(name, uuid));
|
||||
}
|
||||
|
||||
no_such_keyspace::no_such_keyspace(const sstring& ks_name)
|
||||
@@ -1337,6 +1432,9 @@ column_family::query(const query::read_command& cmd, const std::vector<query::pa
|
||||
});
|
||||
}).finally([lc, this]() mutable {
|
||||
_stats.reads.mark(lc);
|
||||
if (lc.is_start()) {
|
||||
_stats.estimated_read.add(lc.latency_in_nano(), _stats.reads.count);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1547,9 +1645,7 @@ database::stop() {
|
||||
return _compaction_manager.stop().then([this] {
|
||||
// try to ensure that CL has done disk flushing
|
||||
if (_commitlog != nullptr) {
|
||||
return _commitlog->shutdown().then([this] {
|
||||
return _commitlog->sync_all_segments();
|
||||
});
|
||||
return _commitlog->shutdown();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).then([this] {
|
||||
@@ -1700,7 +1796,18 @@ future<> column_family::snapshot(sstring name) {
|
||||
return parallel_for_each(tables, [name](sstables::shared_sstable sstable) {
|
||||
auto dir = sstable->get_dir() + "/snapshots/" + name;
|
||||
return recursive_touch_directory(dir).then([sstable, dir] {
|
||||
return sstable->create_links(dir);
|
||||
return sstable->create_links(dir).then_wrapped([] (future<> f) {
|
||||
// If the SSTables are shared, one of the CPUs will fail here.
|
||||
// That is completely fine, though. We only need one link.
|
||||
try {
|
||||
f.get();
|
||||
} catch (std::system_error& e) {
|
||||
if (e.code() != std::error_code(EEXIST, std::system_category())) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
}).then([jsondir, &tables] {
|
||||
// This is not just an optimization. If we have no files, jsondir may not have been created,
|
||||
@@ -1710,7 +1817,7 @@ future<> column_family::snapshot(sstring name) {
|
||||
} else {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
}).then([this, &tables, jsondir] {
|
||||
}).finally([this, &tables, jsondir] {
|
||||
auto shard = std::hash<sstring>()(jsondir) % smp::count;
|
||||
std::unordered_set<sstring> table_names;
|
||||
for (auto& sst : tables) {
|
||||
@@ -1749,6 +1856,124 @@ future<> column_family::snapshot(sstring name) {
|
||||
});
|
||||
}
|
||||
|
||||
future<bool> column_family::snapshot_exists(sstring tag) {
|
||||
sstring jsondir = _config.datadir + "/snapshots/";
|
||||
return engine().open_directory(std::move(jsondir)).then_wrapped([] (future<file> f) {
|
||||
try {
|
||||
f.get0();
|
||||
return make_ready_future<bool>(true);
|
||||
} catch (std::system_error& e) {
|
||||
if (e.code() != std::error_code(ENOENT, std::system_category())) {
|
||||
throw;
|
||||
}
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
enum class missing { no, yes };
|
||||
static missing
|
||||
file_missing(future<> f) {
|
||||
try {
|
||||
f.get();
|
||||
return missing::no;
|
||||
} catch (std::system_error& e) {
|
||||
if (e.code() != std::error_code(ENOENT, std::system_category())) {
|
||||
throw;
|
||||
}
|
||||
return missing::yes;
|
||||
}
|
||||
}
|
||||
|
||||
future<> column_family::clear_snapshot(sstring tag) {
|
||||
sstring jsondir = _config.datadir + "/snapshots/";
|
||||
sstring parent = _config.datadir;
|
||||
if (!tag.empty()) {
|
||||
jsondir += tag;
|
||||
parent += "/snapshots/";
|
||||
}
|
||||
|
||||
lister::dir_entry_types dir_and_files = { directory_entry_type::regular, directory_entry_type::directory };
|
||||
return lister::scan_dir(jsondir, dir_and_files, [this, curr_dir = jsondir, dir_and_files, tag] (directory_entry de) {
|
||||
// FIXME: We really need a better directory walker. This should eventually be part of the seastar infrastructure.
|
||||
// It's hard to write this in a fully recursive manner because we need to keep information about the parent directory,
|
||||
// so we can remove the file. For now, we'll take advantage of the fact that we will at most visit 2 levels and keep
|
||||
// it ugly but simple.
|
||||
auto recurse = make_ready_future<>();
|
||||
if (de.type == directory_entry_type::directory) {
|
||||
// Should only recurse when tag is empty, meaning delete all snapshots
|
||||
if (!tag.empty()) {
|
||||
throw std::runtime_error(sprint("Unexpected directory %s found at %s! Aborting", de.name, curr_dir));
|
||||
}
|
||||
auto newdir = curr_dir + "/" + de.name;
|
||||
recurse = lister::scan_dir(newdir, dir_and_files, [this, curr_dir = newdir] (directory_entry de) {
|
||||
return remove_file(curr_dir + "/" + de.name);
|
||||
});
|
||||
}
|
||||
return recurse.then([fname = curr_dir + "/" + de.name] {
|
||||
return remove_file(fname);
|
||||
});
|
||||
}).then_wrapped([jsondir] (future<> f) {
|
||||
// Fine if directory does not exist. If it did, we delete it
|
||||
if (file_missing(std::move(f)) == missing::no) {
|
||||
return remove_file(jsondir);
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).then([parent] {
|
||||
return sync_directory(parent).then_wrapped([] (future<> f) {
|
||||
// Should always exist for empty tags, but may not exist for a single tag if we never took
|
||||
// snapshots. We will check this here just to mask out the exception, without silencing
|
||||
// unexpected ones.
|
||||
file_missing(std::move(f));
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<std::unordered_map<sstring, column_family::snapshot_details>> column_family::get_snapshot_details() {
|
||||
std::unordered_map<sstring, snapshot_details> all_snapshots;
|
||||
return do_with(std::move(all_snapshots), [this] (auto& all_snapshots) {
|
||||
return lister::scan_dir(_config.datadir + "/snapshots", { directory_entry_type::directory }, [this, &all_snapshots] (directory_entry de) {
|
||||
auto snapshot_name = de.name;
|
||||
auto snapshot = _config.datadir + "/snapshots/" + snapshot_name;
|
||||
all_snapshots.emplace(snapshot_name, snapshot_details());
|
||||
return lister::scan_dir(snapshot, { directory_entry_type::regular }, [this, &all_snapshots, snapshot, snapshot_name] (directory_entry de) {
|
||||
return file_size(snapshot + "/" + de.name).then([this, &all_snapshots, snapshot_name, name = de.name] (auto size) {
|
||||
// The manifest is the only file expected to be in this directory not belonging to the SSTable.
|
||||
// For it, we account the total size, but zero it for the true size calculation.
|
||||
//
|
||||
// All the others should just generate an exception: there is something wrong, so don't blindly
|
||||
// add it to the size.
|
||||
if (name != "manifest.json") {
|
||||
sstables::entry_descriptor::make_descriptor(name);
|
||||
all_snapshots.at(snapshot_name).total += size;
|
||||
} else {
|
||||
size = 0;
|
||||
}
|
||||
return make_ready_future<uint64_t>(size);
|
||||
}).then([this, &all_snapshots, snapshot_name, name = de.name] (auto size) {
|
||||
// FIXME: When we support multiple data directories, the file may not necessarily
|
||||
// live in this same location. May have to test others as well.
|
||||
return file_size(_config.datadir + "/" + name).then_wrapped([&all_snapshots, snapshot_name, size] (auto fut) {
|
||||
try {
|
||||
// File exists in the main SSTable directory. Snapshots are not contributing to size
|
||||
fut.get0();
|
||||
} catch (std::system_error& e) {
|
||||
if (e.code() != std::error_code(ENOENT, std::system_category())) {
|
||||
throw;
|
||||
}
|
||||
all_snapshots.at(snapshot_name).live += size;
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
});
|
||||
}).then([&all_snapshots] {
|
||||
return std::move(all_snapshots);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<> column_family::flush() {
|
||||
// FIXME: this will synchronously wait for this write to finish, but doesn't guarantee
|
||||
// anything about previous writes.
|
||||
@@ -1789,26 +2014,27 @@ void column_family::clear() {
|
||||
future<db::replay_position> column_family::discard_sstables(db_clock::time_point truncated_at) {
|
||||
assert(_stats.pending_compactions == 0);
|
||||
|
||||
db::replay_position rp;
|
||||
auto gc_trunc = to_gc_clock(truncated_at);
|
||||
return with_lock(_sstables_lock.for_read(), [this, truncated_at] {
|
||||
db::replay_position rp;
|
||||
auto gc_trunc = to_gc_clock(truncated_at);
|
||||
|
||||
auto pruned = make_lw_shared<sstable_list>();
|
||||
auto pruned = make_lw_shared<sstable_list>();
|
||||
|
||||
for (auto&p : *_sstables) {
|
||||
if (p.second->max_data_age() <= gc_trunc) {
|
||||
rp = std::max(p.second->get_stats_metadata().position, rp);
|
||||
p.second->mark_for_deletion();
|
||||
continue;
|
||||
for (auto&p : *_sstables) {
|
||||
if (p.second->max_data_age() <= gc_trunc) {
|
||||
rp = std::max(p.second->get_stats_metadata().position, rp);
|
||||
p.second->mark_for_deletion();
|
||||
continue;
|
||||
}
|
||||
pruned->emplace(p.first, p.second);
|
||||
}
|
||||
pruned->emplace(p.first, p.second);
|
||||
}
|
||||
|
||||
_sstables = std::move(pruned);
|
||||
_sstables = std::move(pruned);
|
||||
|
||||
dblog.debug("cleaning out row cache");
|
||||
_cache.clear();
|
||||
|
||||
return make_ready_future<db::replay_position>(rp);
|
||||
dblog.debug("cleaning out row cache");
|
||||
_cache.clear();
|
||||
return make_ready_future<db::replay_position>(rp);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
72
database.hh
72
database.hh
@@ -32,6 +32,7 @@
|
||||
#include "utils/hash.hh"
|
||||
#include "db_clock.hh"
|
||||
#include "gc_clock.hh"
|
||||
#include <chrono>
|
||||
#include "core/distributed.hh"
|
||||
#include <functional>
|
||||
#include <cstdint>
|
||||
@@ -67,6 +68,9 @@
|
||||
#include "utils/exponential_backoff_retry.hh"
|
||||
#include "utils/histogram.hh"
|
||||
#include "sstables/estimated_histogram.hh"
|
||||
#include "sstables/compaction.hh"
|
||||
#include "key_reader.hh"
|
||||
#include <seastar/core/rwlock.hh>
|
||||
|
||||
class frozen_mutation;
|
||||
class reconcilable_result;
|
||||
@@ -125,8 +129,15 @@ public:
|
||||
utils::ihistogram writes{256};
|
||||
sstables::estimated_histogram estimated_read;
|
||||
sstables::estimated_histogram estimated_write;
|
||||
sstables::estimated_histogram estimated_sstable_per_read;
|
||||
utils::ihistogram tombstone_scanned;
|
||||
utils::ihistogram live_scanned;
|
||||
};
|
||||
|
||||
struct snapshot_details {
|
||||
int64_t total;
|
||||
int64_t live;
|
||||
};
|
||||
private:
|
||||
schema_ptr _schema;
|
||||
config _config;
|
||||
@@ -134,8 +145,11 @@ private:
|
||||
lw_shared_ptr<memtable_list> _memtables;
|
||||
// generation -> sstable. Ordered by key so we can easily get the most recent.
|
||||
lw_shared_ptr<sstable_list> _sstables;
|
||||
// There are situations in which we need to stop writing sstables. Flushers will take
|
||||
// the read lock, and the ones that wish to stop that process will take the write lock.
|
||||
rwlock _sstables_lock;
|
||||
mutable row_cache _cache; // Cache covers only sstables.
|
||||
unsigned _sstable_generation = 1;
|
||||
int64_t _sstable_generation = 1;
|
||||
unsigned _mutation_count = 0;
|
||||
db::replay_position _highest_flushed_rp;
|
||||
// Provided by the database that owns this commitlog
|
||||
@@ -145,15 +159,21 @@ private:
|
||||
// Whether or not a cf is queued by its compaction manager.
|
||||
bool _compaction_manager_queued = false;
|
||||
int _compaction_disabled = 0;
|
||||
class memtable_flush_queue;
|
||||
std::unique_ptr<memtable_flush_queue> _flush_queue;
|
||||
private:
|
||||
void update_stats_for_new_sstable(uint64_t new_sstable_data_size);
|
||||
void add_sstable(sstables::sstable&& sstable);
|
||||
void add_sstable(lw_shared_ptr<sstables::sstable> sstable);
|
||||
void add_memtable();
|
||||
future<> flush_memtable_to_sstable(lw_shared_ptr<memtable> memt);
|
||||
future<stop_iteration> try_flush_memtable_to_sstable(lw_shared_ptr<memtable> memt);
|
||||
future<> update_cache(memtable&, lw_shared_ptr<sstable_list> old_sstables);
|
||||
struct merge_comparator;
|
||||
|
||||
// update the sstable generation, making sure that new new sstables don't overwrite this one.
|
||||
void update_sstables_known_generation(unsigned generation) {
|
||||
_sstable_generation = std::max<uint64_t>(_sstable_generation, generation / smp::count + 1);
|
||||
}
|
||||
private:
|
||||
// Creates a mutation reader which covers sstables.
|
||||
// Caller needs to ensure that column_family remains live (FIXME: relax this).
|
||||
@@ -161,7 +181,10 @@ private:
|
||||
mutation_reader make_sstable_reader(const query::partition_range& range) const;
|
||||
|
||||
mutation_source sstables_as_mutation_source();
|
||||
key_source sstables_as_key_source() const;
|
||||
partition_presence_checker make_partition_presence_checker(lw_shared_ptr<sstable_list> old_sstables);
|
||||
// We will use highres because hopefully it won't take more than a few usecs
|
||||
std::chrono::high_resolution_clock::time_point _sstable_writes_disabled_at;
|
||||
public:
|
||||
// Creates a mutation reader which covers all data sources for this column family.
|
||||
// Caller needs to ensure that column_family remains live (FIXME: relax this).
|
||||
@@ -208,15 +231,52 @@ public:
|
||||
void clear(); // discards memtable(s) without flushing them to disk.
|
||||
future<db::replay_position> discard_sstables(db_clock::time_point);
|
||||
|
||||
// Important warning: disabling writes will only have an effect in the current shard.
|
||||
// The other shards will keep writing tables at will. Therefore, you very likely need
|
||||
// to call this separately in all shards first, to guarantee that none of them are writing
|
||||
// new data before you can safely assume that the whole node is disabled.
|
||||
future<int64_t> disable_sstable_write() {
|
||||
_sstable_writes_disabled_at = std::chrono::high_resolution_clock::now();
|
||||
return _sstables_lock.write_lock().then([this] {
|
||||
return make_ready_future<int64_t>((*_sstables->end()).first);
|
||||
});
|
||||
}
|
||||
|
||||
// SSTable writes are now allowed again, and generation is updated to new_generation
|
||||
// returns the amount of microseconds elapsed since we disabled writes.
|
||||
std::chrono::high_resolution_clock::duration enable_sstable_write(int64_t new_generation) {
|
||||
update_sstables_known_generation(new_generation);
|
||||
_sstables_lock.write_unlock();
|
||||
return std::chrono::high_resolution_clock::now() - _sstable_writes_disabled_at;
|
||||
}
|
||||
|
||||
// Make sure the generation numbers are sequential, starting from "start".
|
||||
// Generations before "start" are left untouched.
|
||||
//
|
||||
// Return the highest generation number seen so far
|
||||
//
|
||||
// Word of warning: although this function will reshuffle anything over "start", it is
|
||||
// very dangerous to do that with live SSTables. This is meant to be used with SSTables
|
||||
// that are not yet managed by the system.
|
||||
//
|
||||
// An example usage would query all shards asking what is the highest SSTable number known
|
||||
// to them, and then pass that + 1 as "start".
|
||||
future<std::vector<sstables::entry_descriptor>> reshuffle_sstables(int64_t start);
|
||||
|
||||
// FIXME: this is just an example, should be changed to something more
|
||||
// general. compact_all_sstables() starts a compaction of all sstables.
|
||||
// It doesn't flush the current memtable first. It's just a ad-hoc method,
|
||||
// not a real compaction policy.
|
||||
future<> compact_all_sstables();
|
||||
// Compact all sstables provided in the vector.
|
||||
future<> compact_sstables(std::vector<lw_shared_ptr<sstables::sstable>> sstables);
|
||||
future<> compact_sstables(sstables::compaction_descriptor descriptor);
|
||||
|
||||
future<bool> snapshot_exists(sstring name);
|
||||
|
||||
future<> load_new_sstables(std::vector<sstables::entry_descriptor> new_tables);
|
||||
future<> snapshot(sstring name);
|
||||
future<> clear_snapshot(sstring name);
|
||||
future<std::unordered_map<sstring, snapshot_details>> get_snapshot_details();
|
||||
|
||||
const bool incremental_backups_enabled() const {
|
||||
return _config.enable_incremental_backups;
|
||||
@@ -586,6 +646,9 @@ column_family::apply(const mutation& m, const db::replay_position& rp) {
|
||||
active_memtable().apply(m, rp);
|
||||
seal_on_overflow();
|
||||
_stats.writes.mark(lc);
|
||||
if (lc.is_start()) {
|
||||
_stats.estimated_write.add(lc.latency_in_nano(), _stats.writes.count);
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
@@ -617,6 +680,9 @@ column_family::apply(const frozen_mutation& m, const db::replay_position& rp) {
|
||||
active_memtable().apply(m, rp);
|
||||
seal_on_overflow();
|
||||
_stats.writes.mark(lc);
|
||||
if (lc.is_start()) {
|
||||
_stats.estimated_write.add(lc.latency_in_nano(), _stats.writes.count);
|
||||
}
|
||||
}
|
||||
|
||||
future<> update_schema_version_and_announce(distributed<service::storage_proxy>& proxy);
|
||||
|
||||
@@ -177,6 +177,15 @@ public:
|
||||
|
||||
stats totals;
|
||||
|
||||
void begin_op() {
|
||||
_gate.enter();
|
||||
++totals.pending_operations;
|
||||
}
|
||||
void end_op() {
|
||||
--totals.pending_operations;
|
||||
_gate.leave();
|
||||
}
|
||||
|
||||
segment_manager(config c)
|
||||
: cfg(c), max_size(
|
||||
std::min<size_t>(std::numeric_limits<position_type>::max(),
|
||||
@@ -192,10 +201,14 @@ public:
|
||||
if (cfg.commit_log_location.empty()) {
|
||||
cfg.commit_log_location = "/var/lib/scylla/commitlog";
|
||||
}
|
||||
logger.trace("Commitlog maximum disk size: {} MB / cpu ({} cpus)",
|
||||
max_disk_size / (1024*1024), smp::count);
|
||||
logger.trace("Commitlog {} maximum disk size: {} MB / cpu ({} cpus)",
|
||||
cfg.commit_log_location, max_disk_size / (1024 * 1024),
|
||||
smp::count);
|
||||
_regs = create_counters();
|
||||
}
|
||||
~segment_manager() {
|
||||
logger.trace("Commitlog {} disposed", cfg.commit_log_location);
|
||||
}
|
||||
|
||||
uint64_t next_id() {
|
||||
return ++_ids;
|
||||
@@ -218,7 +231,9 @@ public:
|
||||
void on_timer();
|
||||
void sync();
|
||||
void arm(uint32_t extra = 0) {
|
||||
_timer.arm(std::chrono::milliseconds(cfg.commitlog_sync_period_in_ms + extra));
|
||||
if (!_shutdown) {
|
||||
_timer.arm(std::chrono::milliseconds(cfg.commitlog_sync_period_in_ms + extra));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<sstring> get_active_names() const;
|
||||
@@ -371,6 +386,7 @@ public:
|
||||
}
|
||||
future<sseg_ptr> flush(uint64_t pos = 0) {
|
||||
auto me = shared_from_this();
|
||||
assert(!me.owned());
|
||||
if (pos == 0) {
|
||||
pos = _file_pos;
|
||||
}
|
||||
@@ -378,38 +394,35 @@ public:
|
||||
logger.trace("{} already synced! ({} < {})", *this, pos, _flush_pos);
|
||||
return make_ready_future<sseg_ptr>(std::move(me));
|
||||
}
|
||||
logger.trace("Syncing {} -> {}", _flush_pos, pos);
|
||||
logger.trace("Syncing {} {} -> {}", *this, _flush_pos, pos);
|
||||
// Make sure all disk writes are done.
|
||||
// This is not 100% neccesary, we really only need the ones below our flush pos,
|
||||
// but since we pretty much assume that task ordering will make this the case anyway...
|
||||
|
||||
return _dwrite.write_lock().then(
|
||||
[this, me = std::move(me), pos]() mutable {
|
||||
[this, me, pos]() mutable {
|
||||
_dwrite.write_unlock(); // release it already.
|
||||
pos = std::max(pos, _file_pos);
|
||||
if (pos <= _flush_pos) {
|
||||
logger.trace("{} already synced! ({} < {})", *this, pos, _flush_pos);
|
||||
return make_ready_future<sseg_ptr>(std::move(me));
|
||||
}
|
||||
++_segment_manager->totals.pending_operations;
|
||||
return _file.flush().handle_exception([](auto ex) {
|
||||
_segment_manager->begin_op();
|
||||
return _file.flush().then_wrapped([this, pos, me](auto f) {
|
||||
try {
|
||||
std::rethrow_exception(ex);
|
||||
f.get();
|
||||
// TODO: retry/ignore/fail/stop - optional behaviour in origin.
|
||||
// we fast-fail the whole commit.
|
||||
} catch (std::exception& e) {
|
||||
logger.error("Failed to flush commits to disk: {}", e.what());
|
||||
throw;
|
||||
_flush_pos = std::max(pos, _flush_pos);
|
||||
++_segment_manager->totals.flush_count;
|
||||
logger.trace("{} synced to {}", *this, _flush_pos);
|
||||
return make_ready_future<sseg_ptr>(std::move(me));
|
||||
} catch (...) {
|
||||
logger.error("Failed to flush commits to disk.");
|
||||
logger.error("Failed to flush commits to disk: {}", std::current_exception());
|
||||
throw;
|
||||
}
|
||||
}).then([this, pos, me = std::move(me)]() {
|
||||
_flush_pos = std::max(pos, _flush_pos);
|
||||
++_segment_manager->totals.flush_count;
|
||||
logger.trace("{} synced to {}", *this, _flush_pos);
|
||||
return make_ready_future<sseg_ptr>(std::move(me));
|
||||
}).finally([this] {
|
||||
--_segment_manager->totals.pending_operations;
|
||||
}).finally([this, me] {
|
||||
_segment_manager->end_op();
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -455,6 +468,8 @@ public:
|
||||
_segment_manager->totals.total_size += k;
|
||||
}
|
||||
auto me = shared_from_this();
|
||||
assert(!me.owned());
|
||||
|
||||
if (size == 0) {
|
||||
return make_ready_future<sseg_ptr>(std::move(me));
|
||||
}
|
||||
@@ -489,9 +504,9 @@ public:
|
||||
|
||||
// acquire read lock
|
||||
return _dwrite.read_lock().then([this, size, off, buf = std::move(buf), me]() mutable {
|
||||
++_segment_manager->totals.pending_operations;
|
||||
auto written = make_lw_shared<size_t>(0);
|
||||
auto p = buf.get();
|
||||
_segment_manager->begin_op();
|
||||
return repeat([this, size, off, written, p]() mutable {
|
||||
return _file.dma_write(off + *written, p + *written, size - *written).then_wrapped([this, size, written](auto&& f) {
|
||||
try {
|
||||
@@ -517,11 +532,11 @@ public:
|
||||
});
|
||||
}).finally([this, buf = std::move(buf)]() mutable {
|
||||
_segment_manager->release_buffer(std::move(buf));
|
||||
_segment_manager->end_op();
|
||||
});
|
||||
}).then([me] {
|
||||
return make_ready_future<sseg_ptr>(std::move(me));
|
||||
}).finally([me, this]() {
|
||||
--_segment_manager->totals.pending_operations;
|
||||
_dwrite.read_unlock(); // release
|
||||
});
|
||||
}
|
||||
@@ -540,18 +555,22 @@ public:
|
||||
+ std::to_string(_segment_manager->max_mutation_size)));
|
||||
}
|
||||
// would we make the file too big?
|
||||
if (position() + s > _segment_manager->max_size) {
|
||||
// do this in next segment instead.
|
||||
return finish_and_get_new().then(
|
||||
[id, size, func = std::move(func)](auto new_seg) {
|
||||
return new_seg->allocate(id, size, func);
|
||||
});
|
||||
}
|
||||
// enough data?
|
||||
if (s > (_buffer.size() - _buf_pos)) {
|
||||
// TODO: iff we have to many writes running, maybe we should
|
||||
// wait for this?
|
||||
cycle(s);
|
||||
for (;;) {
|
||||
if (position() + s > _segment_manager->max_size) {
|
||||
// do this in next segment instead.
|
||||
return finish_and_get_new().then(
|
||||
[id, size, func = std::move(func)](auto new_seg) {
|
||||
return new_seg->allocate(id, size, func);
|
||||
});
|
||||
}
|
||||
// enough data?
|
||||
if (s > (_buffer.size() - _buf_pos)) {
|
||||
// TODO: iff we have to many writes running, maybe we should
|
||||
// wait for this?
|
||||
cycle(s);
|
||||
continue; // re-check file size overflow
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
_gate.enter(); // this might throw. I guess we accept this?
|
||||
@@ -629,13 +648,19 @@ public:
|
||||
bool is_still_allocating() const {
|
||||
return !_closed && position() < _segment_manager->max_size;
|
||||
}
|
||||
bool is_clean() {
|
||||
bool is_clean() const {
|
||||
return _cf_dirty.empty();
|
||||
}
|
||||
bool is_unused() {
|
||||
bool is_unused() const {
|
||||
return !is_still_allocating() && is_clean();
|
||||
}
|
||||
bool contains(const replay_position& pos) {
|
||||
bool is_flushed() const {
|
||||
return position() <= _flush_pos;
|
||||
}
|
||||
bool can_delete() const {
|
||||
return is_unused() && is_flushed();
|
||||
}
|
||||
bool contains(const replay_position& pos) const {
|
||||
return pos.id == _desc.id;
|
||||
}
|
||||
sstring get_segment_name() const {
|
||||
@@ -820,8 +845,11 @@ void db::commitlog::segment_manager::flush_segments(bool force) {
|
||||
future<db::commitlog::segment_manager::sseg_ptr> db::commitlog::segment_manager::allocate_segment(bool active) {
|
||||
descriptor d(next_id());
|
||||
return engine().open_file_dma(cfg.commit_log_location + "/" + d.filename(), open_flags::wo | open_flags::create).then([this, d, active](file f) {
|
||||
auto s = make_lw_shared<segment>(this, d, std::move(f), active);
|
||||
return make_ready_future<sseg_ptr>(s);
|
||||
// xfs doesn't like files extended betond eof, so enlarge the file
|
||||
return f.truncate(max_size).then([this, d, active, f] () mutable {
|
||||
auto s = make_lw_shared<segment>(this, d, std::move(f), active);
|
||||
return make_ready_future<sseg_ptr>(s);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -890,15 +918,19 @@ std::ostream& db::operator<<(std::ostream& out, const db::replay_position& p) {
|
||||
}
|
||||
|
||||
void db::commitlog::segment_manager::discard_unused_segments() {
|
||||
auto i = std::remove_if(_segments.begin(), _segments.end(), [=](auto& s) {
|
||||
if (s->is_unused()) {
|
||||
logger.trace("Checking for unused segments ({} active)", _segments.size());
|
||||
|
||||
auto i = std::remove_if(_segments.begin(), _segments.end(), [=](auto s) {
|
||||
if (s->can_delete()) {
|
||||
logger.debug("Segment {} is unused", *s);
|
||||
return true;
|
||||
}
|
||||
if (s->is_still_allocating()) {
|
||||
logger.debug("Not safe to delete segment {}; still allocating.", s);
|
||||
} else {
|
||||
} else if (!s->is_clean()) {
|
||||
logger.debug("Not safe to delete segment {}; dirty is {}", s, segment::cf_mark {*s});
|
||||
} else {
|
||||
logger.debug("Not safe to delete segment {}; disk ops pending", s);
|
||||
}
|
||||
return false;
|
||||
});
|
||||
@@ -918,12 +950,12 @@ future<> db::commitlog::segment_manager::sync_all_segments() {
|
||||
|
||||
future<> db::commitlog::segment_manager::shutdown() {
|
||||
if (!_shutdown) {
|
||||
_shutdown = true;
|
||||
_timer.cancel();
|
||||
return _gate.close().then([this] {
|
||||
return parallel_for_each(_segments, [this](sseg_ptr s) {
|
||||
return s->shutdown();
|
||||
});
|
||||
_shutdown = true; // no re-arm, no create new segments.
|
||||
_timer.cancel(); // no more timer calls
|
||||
return parallel_for_each(_segments, [this](sseg_ptr s) {
|
||||
return s->shutdown(); // close each segment (no more alloc)
|
||||
}).then(std::bind(&segment_manager::sync_all_segments, this)).then([this] { // flush all
|
||||
return _gate.close(); // wait for any pending ops
|
||||
});
|
||||
}
|
||||
return make_ready_future<>();
|
||||
@@ -936,11 +968,9 @@ future<> db::commitlog::segment_manager::shutdown() {
|
||||
* Only use from tests.
|
||||
*/
|
||||
future<> db::commitlog::segment_manager::clear() {
|
||||
logger.debug("Clearing all segments");
|
||||
_shutdown = true;
|
||||
_timer.cancel();
|
||||
flush_segments(true);
|
||||
return sync_all_segments().then([this] {
|
||||
logger.debug("Clearing commitlog");
|
||||
return shutdown().then([this] {
|
||||
logger.debug("Clearing all segments");
|
||||
for (auto& s : _segments) {
|
||||
s->mark_clean();
|
||||
}
|
||||
@@ -951,30 +981,30 @@ future<> db::commitlog::segment_manager::clear() {
|
||||
* Called by timer in periodic mode.
|
||||
*/
|
||||
void db::commitlog::segment_manager::sync() {
|
||||
for (auto& s : _segments) {
|
||||
for (auto s : _segments) {
|
||||
s->sync(); // we do not care about waiting...
|
||||
}
|
||||
}
|
||||
|
||||
void db::commitlog::segment_manager::on_timer() {
|
||||
if (cfg.mode != sync_mode::BATCH) {
|
||||
sync();
|
||||
}
|
||||
// IFF a new segment was put in use since last we checked, and we're
|
||||
// above threshold, request flush.
|
||||
if (_new_counter > 0) {
|
||||
auto max = max_disk_size;
|
||||
auto cur = totals.total_size_on_disk;
|
||||
if (max != 0 && cur >= max) {
|
||||
_new_counter = 0;
|
||||
logger.debug("Size on disk {} MB exceeds local maximum {} MB", cur / (1024 * 1024), max / (1024 * 1024));
|
||||
flush_segments();
|
||||
}
|
||||
}
|
||||
// Gate, because we are starting potentially blocking ops
|
||||
// without waiting for them, so segement_manager could be shut down
|
||||
// while they are running.
|
||||
seastar::with_gate(_gate, [this] {
|
||||
if (cfg.mode != sync_mode::BATCH) {
|
||||
sync();
|
||||
}
|
||||
// IFF a new segment was put in use since last we checked, and we're
|
||||
// above threshold, request flush.
|
||||
if (_new_counter > 0) {
|
||||
auto max = max_disk_size;
|
||||
auto cur = totals.total_size_on_disk;
|
||||
if (max != 0 && cur >= max) {
|
||||
_new_counter = 0;
|
||||
logger.debug("Size on disk {} MB exceeds local maximum {} MB", cur / (1024 * 1024), max / (1024 * 1024));
|
||||
flush_segments();
|
||||
}
|
||||
}
|
||||
// take outstanding allocations into regard. This is paranoid,
|
||||
// but if for some reason the file::open takes longer than timer period,
|
||||
// we could flood the reserve list with new segments
|
||||
@@ -1190,6 +1220,10 @@ db::commitlog::read_log_file(file f, commit_load_reader_func next, position_type
|
||||
});
|
||||
});
|
||||
}
|
||||
future<> stop() {
|
||||
eof = true;
|
||||
return make_ready_future<>();
|
||||
}
|
||||
future<> read_header() {
|
||||
return fin.read_exactly(segment::descriptor_header_size).then([this](temporary_buffer<char> buf) {
|
||||
if (!advance(buf)) {
|
||||
@@ -1202,6 +1236,12 @@ db::commitlog::read_log_file(file f, commit_load_reader_func next, position_type
|
||||
auto id = in.read<uint64_t>();
|
||||
auto checksum = in.read<uint32_t>();
|
||||
|
||||
if (ver == 0 && id == 0 && checksum == 0) {
|
||||
// let's assume this was an empty (pre-allocated)
|
||||
// file. just skip it.
|
||||
return stop();
|
||||
}
|
||||
|
||||
crc32_nbo crc;
|
||||
crc.process(ver);
|
||||
crc.process<int32_t>(id & 0xffffffff);
|
||||
@@ -1230,6 +1270,11 @@ db::commitlog::read_log_file(file f, commit_load_reader_func next, position_type
|
||||
auto next = in.read<uint32_t>();
|
||||
auto checksum = in.read<uint32_t>();
|
||||
|
||||
if (next == 0 && checksum == 0) {
|
||||
// in a pre-allocating world, this means eof
|
||||
return stop();
|
||||
}
|
||||
|
||||
crc32_nbo crc;
|
||||
crc.process<int32_t>(id & 0xffffffff);
|
||||
crc.process<int32_t>(id >> 32);
|
||||
@@ -1264,7 +1309,7 @@ db::commitlog::read_log_file(file f, commit_load_reader_func next, position_type
|
||||
auto checksum = in.read<uint32_t>();
|
||||
|
||||
if (size == 0) {
|
||||
// special urchin case: zero padding due to dma blocks
|
||||
// special scylla case: zero padding due to dma blocks
|
||||
auto slack = next - pos;
|
||||
return skip(slack);
|
||||
}
|
||||
|
||||
30
db/config.hh
30
db/config.hh
@@ -23,6 +23,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <boost/program_options.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <unordered_map>
|
||||
#include "core/sstring.hh"
|
||||
#include "core/future.hh"
|
||||
@@ -112,6 +113,32 @@ public:
|
||||
future<> read_from_file(const sstring&);
|
||||
future<> read_from_file(file);
|
||||
|
||||
/**
|
||||
* Scans the environment variables for configuration files directory
|
||||
* definition. It's either $SCYLLA_CONF, $SCYLLA_HOME/conf or "conf" if none
|
||||
* of SCYLLA_CONF and SCYLLA_HOME is defined.
|
||||
*
|
||||
* @return path of the directory where configuration files are located
|
||||
* according the environment variables definitions.
|
||||
*/
|
||||
static boost::filesystem::path get_conf_dir() {
|
||||
using namespace boost::filesystem;
|
||||
|
||||
path confdir;
|
||||
auto* cd = std::getenv("SCYLLA_CONF");
|
||||
if (cd != nullptr) {
|
||||
confdir = path(cd);
|
||||
} else {
|
||||
auto* p = std::getenv("SCYLLA_HOME");
|
||||
if (p != nullptr) {
|
||||
confdir = path(p);
|
||||
}
|
||||
confdir /= "conf";
|
||||
}
|
||||
|
||||
return confdir;
|
||||
}
|
||||
|
||||
typedef std::unordered_map<sstring, sstring> string_map;
|
||||
typedef std::vector<sstring> string_list;
|
||||
using seed_provider_type = db::seed_provider_type;
|
||||
@@ -132,7 +159,7 @@ public:
|
||||
* type: is the value type (bool, uint32_t etc)
|
||||
* status: is the current _usage_ of the opt. I.e. if you actually use the value, set it to "Used".
|
||||
* Most values are set to "Unused", as in "will probably have an effect eventually".
|
||||
* Values set to "Invalid" have no meaning/usage in urchin, and should (and will currently)
|
||||
* Values set to "Invalid" have no meaning/usage in scylla, and should (and will currently)
|
||||
* be signaled to a user providing a config with them, that these settings are pointless.
|
||||
* desc: documentation.
|
||||
* value...: enumerated valid values if any. Not currently used, but why not...
|
||||
@@ -715,6 +742,7 @@ public:
|
||||
val(api_address, sstring, "", Used, "Http Rest API address") \
|
||||
val(api_ui_dir, sstring, "swagger-ui/dist/", Used, "The directory location of the API GUI") \
|
||||
val(api_doc_dir, sstring, "api/api-doc/", Used, "The API definition file directory") \
|
||||
val(load_balance, sstring, "none", Used, "CQL request load balancing: 'none' or round-robin'") \
|
||||
/* done! */
|
||||
|
||||
#define _make_value_member(name, type, deflt, status, desc, ...) \
|
||||
|
||||
@@ -52,6 +52,7 @@
|
||||
#include "core/do_with.hh"
|
||||
#include "core/thread.hh"
|
||||
#include "json.hh"
|
||||
#include "log.hh"
|
||||
|
||||
#include "db/marshal/type_parser.hh"
|
||||
#include "db/config.hh"
|
||||
@@ -67,6 +68,8 @@ using namespace db::system_keyspace;
|
||||
namespace db {
|
||||
namespace schema_tables {
|
||||
|
||||
logging::logger logger("schema_tables");
|
||||
|
||||
std::vector<const char*> ALL { KEYSPACES, COLUMNFAMILIES, COLUMNS, TRIGGERS, USERTYPES, /* not present in 2.1.8: FUNCTIONS, AGGREGATES */ };
|
||||
|
||||
using days = std::chrono::duration<int, std::ratio<24 * 3600>>;
|
||||
@@ -670,6 +673,8 @@ future<> merge_tables(distributed<service::storage_proxy>& proxy, schema_result&
|
||||
if (engine().cpu_id() == 0) {
|
||||
for (auto&& cfm : created) {
|
||||
service::migration_manager::notify_create_column_family(cfm).get0();
|
||||
auto& ks = db.find_keyspace(cfm->ks_name());
|
||||
ks.make_directory_for_column_family(cfm->cf_name(), cfm->id());
|
||||
}
|
||||
for (auto&& cfm : dropped) {
|
||||
service::migration_manager::notify_drop_column_family(cfm).get0();
|
||||
@@ -1325,9 +1330,15 @@ void create_table_from_table_row_and_column_rows(schema_builder& builder, const
|
||||
builder.set_speculative_retry(table_row.get_nonnull<sstring>("speculative_retry"));
|
||||
}
|
||||
|
||||
if (table_row.has("compaction_strategy")) {
|
||||
auto strategy = table_row.get_nonnull<sstring>("compression_strategy_class");
|
||||
builder.set_compaction_strategy(sstables::compaction_strategy::type(strategy));
|
||||
if (table_row.has("compaction_strategy_class")) {
|
||||
auto strategy = table_row.get_nonnull<sstring>("compaction_strategy_class");
|
||||
try {
|
||||
builder.set_compaction_strategy(sstables::compaction_strategy::type(strategy));
|
||||
} catch (const exceptions::configuration_exception& e) {
|
||||
// If compaction strategy class isn't supported, fallback to size tiered.
|
||||
logger.warn("Falling back to size-tiered compaction strategy after the problem: {}", e.what());
|
||||
builder.set_compaction_strategy(sstables::compaction_strategy_type::size_tiered);
|
||||
}
|
||||
}
|
||||
|
||||
if (table_row.has("compaction_strategy_options")) {
|
||||
|
||||
@@ -505,6 +505,10 @@ future<> setup(distributed<database>& db, distributed<cql3::query_processor>& qp
|
||||
return check_health();
|
||||
}).then([] {
|
||||
return db::schema_tables::save_system_keyspace_schema();
|
||||
}).then([] {
|
||||
return net::get_messaging_service().invoke_on_all([] (auto& ms){
|
||||
return ms.init_local_preferred_ip_cache();
|
||||
});
|
||||
});
|
||||
return make_ready_future<>();
|
||||
}
|
||||
@@ -714,11 +718,28 @@ future<std::unordered_map<gms::inet_address, utils::UUID>> load_host_ids() {
|
||||
|
||||
future<> update_preferred_ip(gms::inet_address ep, gms::inet_address preferred_ip) {
|
||||
sstring req = "INSERT INTO system.%s (peer, preferred_ip) VALUES (?, ?)";
|
||||
return execute_cql(req, PEERS, ep.addr(), preferred_ip).discard_result().then([] {
|
||||
return execute_cql(req, PEERS, ep.addr(), preferred_ip.addr()).discard_result().then([] {
|
||||
return force_blocking_flush(PEERS);
|
||||
});
|
||||
}
|
||||
|
||||
future<std::unordered_map<gms::inet_address, gms::inet_address>> get_preferred_ips() {
|
||||
sstring req = "SELECT peer, preferred_ip FROM system.%s";
|
||||
|
||||
return execute_cql(req, PEERS).then([] (::shared_ptr<cql3::untyped_result_set> cql_res_set) {
|
||||
std::unordered_map<gms::inet_address, gms::inet_address> res;
|
||||
|
||||
for (auto& r : *cql_res_set) {
|
||||
if (r.has("preferred_ip")) {
|
||||
res.emplace(gms::inet_address(r.get_as<net::ipv4_address>("peer")),
|
||||
gms::inet_address(r.get_as<net::ipv4_address>("preferred_ip")));
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
});
|
||||
}
|
||||
|
||||
template <typename Value>
|
||||
static future<> update_cached_values(gms::inet_address ep, sstring column_name, Value value) {
|
||||
return make_ready_future<>();
|
||||
|
||||
@@ -94,6 +94,7 @@ future<> update_tokens(std::unordered_set<dht::token> tokens);
|
||||
future<> update_tokens(gms::inet_address ep, std::unordered_set<dht::token> tokens);
|
||||
|
||||
future<> update_preferred_ip(gms::inet_address ep, gms::inet_address preferred_ip);
|
||||
future<std::unordered_map<gms::inet_address, gms::inet_address>> get_preferred_ips();
|
||||
|
||||
template <typename Value>
|
||||
future<> update_peer_info(gms::inet_address ep, sstring column_name, Value value);
|
||||
|
||||
1
debian/compat
vendored
Normal file
1
debian/compat
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9
|
||||
20
debian/control
vendored
Normal file
20
debian/control
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Source: scylla-server
|
||||
Maintainer: Takuya ASADA <syuu@scylladb.com>
|
||||
Homepage: http://scylladb.com
|
||||
Section: database
|
||||
Priority: optional
|
||||
Standards-Version: 3.9.2
|
||||
Build-Depends: debhelper (>= 9), libyaml-cpp-dev, liblz4-dev, libsnappy-dev, libcrypto++-dev, libjsoncpp-dev, libaio-dev, libthrift-dev, thrift-compiler, antlr3-tool, antlr3-c++-dev, ragel, g++-4.9, ninja-build, git, libboost-program-options1.55-dev, libboost-filesystem1.55-dev, libboost-system1.55-dev, libboost-thread1.55-dev, libboost-test1.55-dev
|
||||
|
||||
Package: scylla-server
|
||||
Architecture: amd64
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, hugepages
|
||||
Description: Scylla database server binaries
|
||||
Scylla is a highly scalable, eventually consistent, distributed, partitioned row DB.
|
||||
|
||||
Package: scylla-server-dbg
|
||||
Architecture: amd64
|
||||
Depends: scylla-server (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends}
|
||||
Description: debugging symbols for scylla-server
|
||||
Scylla is a highly scalable, eventually consistent, distributed, partitioned row DB.
|
||||
This package contains the debugging symbols for scylla-server.
|
||||
16
debian/copyright
vendored
Normal file
16
debian/copyright
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: Scylla DB
|
||||
Upstream-Contact: http://www.scylladb.com/
|
||||
Source: https://github.com/scylladb/scylla
|
||||
|
||||
Files: *
|
||||
Copyright: Copyright (C) 2015 ScyllaDB
|
||||
License: AGPL-3.0
|
||||
|
||||
Files: seastar/*
|
||||
Copyright: Copyright (C) 2015 ScyllaDB
|
||||
License: Apache
|
||||
|
||||
Files: seastar/dpdk/*
|
||||
Copyright: Copyright(c) 2015 Intel Corporation. All rights reserved.
|
||||
License: BSD-3-clause
|
||||
4
debian/limits.d/scylla.conf
vendored
Normal file
4
debian/limits.d/scylla.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
scylla - memlock unlimited
|
||||
scylla - nofile 100000
|
||||
scylla - as unlimited
|
||||
scylla - nproc 8096
|
||||
50
debian/rules
vendored
Executable file
50
debian/rules
vendored
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
DOC = $(CURDIR)/debian/scylla-server/usr/share/doc/scylla-server
|
||||
SCRIPTS = $(CURDIR)/debian/scylla-server/usr/lib/scylla
|
||||
LIMITS= $(CURDIR)/debian/scylla-server/etc/security/limits.d
|
||||
LIBS = $(CURDIR)/debian/scylla-server/usr/lib
|
||||
|
||||
override_dh_auto_build:
|
||||
./configure.py --disable-xen --enable-dpdk --mode=release --static-stdc++ --compiler=g++-4.9
|
||||
ninja
|
||||
|
||||
override_dh_auto_clean:
|
||||
rm -rf build/release seastar/build
|
||||
rm -rf Cql.tokens
|
||||
rm -rf build.ninja seastar/build.ninja
|
||||
|
||||
override_dh_auto_install:
|
||||
mkdir -p $(CURDIR)/debian/scylla-server/etc/default/ && \
|
||||
cp $(CURDIR)/dist/redhat/sysconfig/scylla-server \
|
||||
$(CURDIR)/debian/scylla-server/etc/default/
|
||||
|
||||
mkdir -p $(LIMITS) && \
|
||||
cp $(CURDIR)/debian/limits.d/scylla.conf $(LIMITS)
|
||||
|
||||
mkdir -p $(DOC) && \
|
||||
cp $(CURDIR)/*.md $(DOC)
|
||||
cp $(CURDIR)/NOTICE.txt $(DOC)
|
||||
cp $(CURDIR)/ORIGIN $(DOC)
|
||||
cp -r $(CURDIR)/licenses $(DOC)
|
||||
|
||||
mkdir -p $(SCRIPTS) && \
|
||||
cp $(CURDIR)/seastar/dpdk/tools/dpdk_nic_bind.py $(SCRIPTS)
|
||||
cp $(CURDIR)/dist/common/scripts/* $(SCRIPTS)
|
||||
|
||||
mkdir -p $(CURDIR)/debian/scylla-server/usr/bin/ && \
|
||||
cp $(CURDIR)/build/release/scylla \
|
||||
$(CURDIR)/debian/scylla-server/usr/bin/
|
||||
|
||||
mkdir -p $(CURDIR)/debian/scylla-server/var/lib/scylla/data
|
||||
mkdir -p $(CURDIR)/debian/scylla-server/var/lib/scylla/commitlog
|
||||
mkdir -p $(CURDIR)/debian/scylla-server/var/lib/scylla/conf
|
||||
cp $(CURDIR)/conf/scylla.yaml \
|
||||
$(CURDIR)/debian/scylla-server/var/lib/scylla/conf
|
||||
cp $(CURDIR)/conf/cassandra-rackdc.properties \
|
||||
$(CURDIR)/debian/scylla-server/var/lib/scylla/conf
|
||||
|
||||
override_dh_strip:
|
||||
dh_strip --dbg-package=scylla-server-dbg
|
||||
%:
|
||||
dh $@
|
||||
@@ -14,3 +14,4 @@ elif [ "$NETWORK_MODE" = "dpdk" ]; then
|
||||
echo $NR_HUGEPAGES > $n/hugepages/hugepages-2048kB/nr_hugepages
|
||||
done
|
||||
fi
|
||||
hugeadm --create-mounts
|
||||
13
debian/scripts/scylla_run
vendored
Executable file
13
debian/scripts/scylla_run
vendored
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
args="--datadir $DATA_DIR --commitlog-directory $COMMITLOG_DIR $SCYLLA_ARGS"
|
||||
|
||||
if [ "$NETWORK_MODE" = "virtio" ]; then
|
||||
args="$args --network-stack native"
|
||||
elif [ "$NETWORK_MODE" = "dpdk" ]; then
|
||||
args="$args --network-stack native --dpdk-pmd"
|
||||
elif [ "$NETWORK_MODE" = "posix" ]; then
|
||||
args="$args --network-stack posix"
|
||||
fi
|
||||
|
||||
exec sudo -u $USER env HOME=/var/lib/scylla /usr/bin/scylla $args
|
||||
24
debian/scylla-server.postinst
vendored
Normal file
24
debian/scylla-server.postinst
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$1" = configure ]; then
|
||||
adduser --system \
|
||||
--quiet \
|
||||
--home /var/lib/scylla \
|
||||
--no-create-home \
|
||||
--disabled-password \
|
||||
--group scylla
|
||||
chown -R scylla:scylla /var/lib/scylla
|
||||
fi
|
||||
|
||||
# Automatically added by dh_installinit
|
||||
if [ -x "/etc/init.d/scylla-server" ]; then
|
||||
if [ ! -e "/etc/init/scylla-server.conf" ]; then
|
||||
update-rc.d scylla-server defaults >/dev/null
|
||||
fi
|
||||
fi
|
||||
# End automatically added section
|
||||
# Automatically added by dh_installinit
|
||||
update-rc.d -f scylla-server remove >/dev/null || exit $?
|
||||
# End automatically added section
|
||||
9
debian/scylla-server.preinst
vendored
Normal file
9
debian/scylla-server.preinst
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# Automatically added by dh_installinit
|
||||
if [ "$1" = install ] || [ "$1" = upgrade ]; then
|
||||
if [ -e "/etc/init.d/scylla-server" ] && [ -L "/etc/init.d/scylla-server" ] \
|
||||
&& [ $(readlink -f "/etc/init.d/scylla-server") = /lib/init/upstart-job ]
|
||||
then
|
||||
rm -f "/etc/init.d/scylla-server"
|
||||
fi
|
||||
fi
|
||||
# End automatically added section
|
||||
33
debian/scylla-server.upstart
vendored
Normal file
33
debian/scylla-server.upstart
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
# scylla-server - ScyllaDB
|
||||
#
|
||||
# ScyllaDB
|
||||
|
||||
description "ScyllaDB server"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [!2345]
|
||||
|
||||
umask 022
|
||||
|
||||
console log
|
||||
|
||||
pre-start script
|
||||
cd /var/lib/scylla
|
||||
. /etc/default/scylla-server
|
||||
export OPTS_FILE NETWORK_MODE TAP BRIDGE ETHDRV ETHPCIID NR_HUGEPAGES USER GROUP SCYLLA_ARGS
|
||||
/usr/lib/scylla/scylla_prepare
|
||||
end script
|
||||
|
||||
script
|
||||
cd /var/lib/scylla
|
||||
. /etc/default/scylla-server
|
||||
export OPTS_FILE NETWORK_MODE TAP BRIDGE ETHDRV ETHPCIID NR_HUGEPAGES USER GROUP SCYLLA_ARGS
|
||||
exec /usr/lib/scylla/scylla_run
|
||||
end script
|
||||
|
||||
post-stop script
|
||||
cd /var/lib/scylla
|
||||
. /etc/default/scylla-server
|
||||
export OPTS_FILE NETWORK_MODE TAP BRIDGE ETHDRV ETHPCIID NR_HUGEPAGES USER GROUP SCYLLA_ARGS
|
||||
/usr/lib/scylla/scylla_stop
|
||||
end script
|
||||
@@ -38,40 +38,36 @@
|
||||
|
||||
#include "dht/boot_strapper.hh"
|
||||
#include "service/storage_service.hh"
|
||||
#include "dht/range_streamer.hh"
|
||||
#include "gms/failure_detector.hh"
|
||||
#include "log.hh"
|
||||
|
||||
static logging::logger logger("boot_strapper");
|
||||
|
||||
namespace dht {
|
||||
|
||||
future<> boot_strapper::bootstrap() {
|
||||
// FIXME: Stream data from other nodes
|
||||
service::get_local_storage_service().finish_bootstrapping();
|
||||
return make_ready_future<>();
|
||||
#if 0
|
||||
if (logger.isDebugEnabled())
|
||||
logger.debug("Beginning bootstrap process");
|
||||
logger.debug("Beginning bootstrap process: sorted_tokens={}", _token_metadata.sorted_tokens());
|
||||
|
||||
RangeStreamer streamer = new RangeStreamer(tokenMetadata, tokens, address, "Bootstrap");
|
||||
streamer.addSourceFilter(new RangeStreamer.FailureDetectorSourceFilter(FailureDetector.instance));
|
||||
|
||||
for (String keyspaceName : Schema.instance.getNonSystemKeyspaces())
|
||||
{
|
||||
AbstractReplicationStrategy strategy = Keyspace.open(keyspaceName).getReplicationStrategy();
|
||||
streamer.addRanges(keyspaceName, strategy.getPendingAddressRanges(tokenMetadata, tokens, address));
|
||||
auto streamer = make_lw_shared<range_streamer>(_db, _token_metadata, _tokens, _address, "Bootstrap");
|
||||
streamer->add_source_filter(std::make_unique<range_streamer::failure_detector_source_filter>(gms::get_local_failure_detector()));
|
||||
for (const auto& keyspace_name : _db.local().get_non_system_keyspaces()) {
|
||||
auto& ks = _db.local().find_keyspace(keyspace_name);
|
||||
auto& strategy = ks.get_replication_strategy();
|
||||
std::vector<range<token>> ranges = strategy.get_pending_address_ranges(_token_metadata, _tokens, _address);
|
||||
logger.debug("Will stream keyspace={}, ranges={}", keyspace_name, ranges);
|
||||
streamer->add_ranges(keyspace_name, ranges);
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
streamer.fetchAsync().get();
|
||||
StorageService.instance.finishBootstrapping();
|
||||
}
|
||||
catch (InterruptedException e)
|
||||
{
|
||||
throw new RuntimeException("Interrupted while waiting on boostrap to complete. Bootstrap will have to be restarted.");
|
||||
}
|
||||
catch (ExecutionException e)
|
||||
{
|
||||
throw new RuntimeException("Error during boostrap: " + e.getCause().getMessage(), e.getCause());
|
||||
}
|
||||
#endif
|
||||
return streamer->fetch_async().then_wrapped([streamer] (auto&& f) {
|
||||
try {
|
||||
auto state = f.get0();
|
||||
} catch (...) {
|
||||
throw std::runtime_error(sprint("Error during boostrap: %s", std::current_exception()));
|
||||
}
|
||||
service::get_local_storage_service().finish_bootstrapping();
|
||||
return make_ready_future<>();
|
||||
});
|
||||
}
|
||||
|
||||
std::unordered_set<token> boot_strapper::get_bootstrap_tokens(token_metadata metadata, database& db) {
|
||||
@@ -100,7 +96,9 @@ std::unordered_set<token> boot_strapper::get_bootstrap_tokens(token_metadata met
|
||||
// if (numTokens == 1)
|
||||
// logger.warn("Picking random token for a single vnode. You should probably add more vnodes; failing that, you should probably specify the token manually");
|
||||
|
||||
return get_random_tokens(metadata, num_tokens);
|
||||
auto tokens = get_random_tokens(metadata, num_tokens);
|
||||
logger.debug("Get bootstrap_tokens={}", tokens);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
std::unordered_set<token> boot_strapper::get_random_tokens(token_metadata metadata, size_t num_tokens) {
|
||||
|
||||
@@ -49,14 +49,16 @@ class boot_strapper {
|
||||
using inet_address = gms::inet_address;
|
||||
using token_metadata = locator::token_metadata;
|
||||
using token = dht::token;
|
||||
distributed<database>& _db;
|
||||
/* endpoint that needs to be bootstrapped */
|
||||
inet_address _address;
|
||||
/* token of the node being bootstrapped. */
|
||||
std::unordered_set<token> _tokens;
|
||||
token_metadata _token_metadata;
|
||||
public:
|
||||
boot_strapper(inet_address addr, std::unordered_set<token> tokens, token_metadata tmd)
|
||||
: _address(addr)
|
||||
boot_strapper(distributed<database>& db, inet_address addr, std::unordered_set<token> tokens, token_metadata tmd)
|
||||
: _db(db)
|
||||
, _address(addr)
|
||||
, _tokens(tokens)
|
||||
, _token_metadata(tmd) {
|
||||
}
|
||||
|
||||
@@ -44,11 +44,19 @@ token byte_ordered_partitioner::midpoint(const token& t1, const token& t2) const
|
||||
|
||||
unsigned
|
||||
byte_ordered_partitioner::shard_of(const token& t) const {
|
||||
if (t._data.empty()) {
|
||||
return 0;
|
||||
switch (t._kind) {
|
||||
case token::kind::before_all_keys:
|
||||
return 0;
|
||||
case token::kind::after_all_keys:
|
||||
return smp::count - 1;
|
||||
case token::kind::key:
|
||||
if (t._data.empty()) {
|
||||
return 0;
|
||||
}
|
||||
// treat first byte as a fraction in the range [0, 1) and divide it evenly:
|
||||
return (uint8_t(t._data[0]) * smp::count) >> 8;
|
||||
}
|
||||
// treat first byte as a fraction in the range [0, 1) and divide it evenly:
|
||||
return (uint8_t(t._data[0]) * smp::count) >> 8;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
using registry = class_registrator<i_partitioner, byte_ordered_partitioner>;
|
||||
|
||||
@@ -161,12 +161,7 @@ std::ostream& operator<<(std::ostream& out, const token& t) {
|
||||
} else if (t._kind == token::kind::before_all_keys) {
|
||||
out << "minimum token";
|
||||
} else {
|
||||
auto flags = out.flags();
|
||||
for (auto c : t._data) {
|
||||
unsigned char x = c;
|
||||
out << std::hex << std::setw(2) << std::setfill('0') << +x << " ";
|
||||
}
|
||||
out.flags(flags);
|
||||
out << global_partitioner().to_sstring(t);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
@@ -297,6 +292,10 @@ int ring_position_comparator::operator()(const ring_position& lh, const ring_pos
|
||||
return lh.tri_compare(s, rh);
|
||||
}
|
||||
|
||||
int token_comparator::operator()(const token& t1, const token& t2) const {
|
||||
return tri_compare(t1, t2);
|
||||
}
|
||||
|
||||
void token::serialize(bytes::iterator& out) const {
|
||||
uint8_t kind = _kind == dht::token::kind::before_all_keys ? 0 :
|
||||
_kind == dht::token::kind::key ? 1 : 2;
|
||||
|
||||
@@ -159,6 +159,8 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
using decorated_key_opt = std::experimental::optional<decorated_key>;
|
||||
|
||||
class i_partitioner {
|
||||
public:
|
||||
virtual ~i_partitioner() {}
|
||||
@@ -390,6 +392,11 @@ struct ring_position_comparator {
|
||||
int operator()(const ring_position& lh, const ring_position& rh) const;
|
||||
};
|
||||
|
||||
struct token_comparator {
|
||||
// Return values are those of a trichotomic comparison.
|
||||
int operator()(const token& t1, const token& t2) const;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const token& t);
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const decorated_key& t);
|
||||
|
||||
@@ -178,11 +178,19 @@ murmur3_partitioner::get_token_validator() {
|
||||
|
||||
unsigned
|
||||
murmur3_partitioner::shard_of(const token& t) const {
|
||||
int64_t l = long_token(t);
|
||||
// treat l as a fraction between 0 and 1 and use 128-bit arithmetic to
|
||||
// divide that range evenly among shards:
|
||||
uint64_t adjusted = uint64_t(l) + uint64_t(std::numeric_limits<int64_t>::min());
|
||||
return (__int128(adjusted) * smp::count) >> 64;
|
||||
switch (t._kind) {
|
||||
case token::kind::before_all_keys:
|
||||
return 0;
|
||||
case token::kind::after_all_keys:
|
||||
return smp::count - 1;
|
||||
case token::kind::key:
|
||||
int64_t l = long_token(t);
|
||||
// treat l as a fraction between 0 and 1 and use 128-bit arithmetic to
|
||||
// divide that range evenly among shards:
|
||||
uint64_t adjusted = uint64_t(l) + uint64_t(std::numeric_limits<int64_t>::min());
|
||||
return (__int128(adjusted) * smp::count) >> 64;
|
||||
}
|
||||
assert(0);
|
||||
}
|
||||
|
||||
using registry = class_registrator<i_partitioner, murmur3_partitioner>;
|
||||
|
||||
271
dht/range_streamer.cc
Normal file
271
dht/range_streamer.cc
Normal file
@@ -0,0 +1,271 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "dht/range_streamer.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "database.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "log.hh"
|
||||
#include "streaming/stream_plan.hh"
|
||||
#include "streaming/stream_state.hh"
|
||||
|
||||
namespace dht {
|
||||
|
||||
logging::logger logger("range_streamer");
|
||||
|
||||
using inet_address = gms::inet_address;
|
||||
|
||||
static std::unordered_map<range<token>, std::unordered_set<inet_address>>
|
||||
unordered_multimap_to_unordered_map(const std::unordered_multimap<range<token>, inet_address>& multimap) {
|
||||
std::unordered_map<range<token>, std::unordered_set<inet_address>> ret;
|
||||
for (auto x : multimap) {
|
||||
auto& range_token = x.first;
|
||||
auto& ep = x.second;
|
||||
auto it = ret.find(range_token);
|
||||
if (it != ret.end()) {
|
||||
it->second.emplace(ep);
|
||||
} else {
|
||||
ret.emplace(range_token, std::unordered_set<inet_address>{ep});
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::unordered_multimap<inet_address, range<token>>
|
||||
range_streamer::get_range_fetch_map(const std::unordered_multimap<range<token>, inet_address>& ranges_with_sources,
|
||||
const std::unordered_set<std::unique_ptr<i_source_filter>>& source_filters,
|
||||
const sstring& keyspace) {
|
||||
std::unordered_multimap<inet_address, range<token>> range_fetch_map_map;
|
||||
for (auto x : unordered_multimap_to_unordered_map(ranges_with_sources)) {
|
||||
const range<token>& range_ = x.first;
|
||||
const std::unordered_set<inet_address>& addresses = x.second;
|
||||
bool found_source = false;
|
||||
for (auto address : addresses) {
|
||||
if (address == utils::fb_utilities::get_broadcast_address()) {
|
||||
// If localhost is a source, we have found one, but we don't add it to the map to avoid streaming locally
|
||||
found_source = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto filtered = false;
|
||||
for (const auto& filter : source_filters) {
|
||||
if (!filter->should_include(address)) {
|
||||
filtered = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (filtered) {
|
||||
continue;
|
||||
}
|
||||
|
||||
range_fetch_map_map.emplace(address, range_);
|
||||
found_source = true;
|
||||
break; // ensure we only stream from one other node for each range
|
||||
}
|
||||
|
||||
if (!found_source) {
|
||||
throw std::runtime_error(sprint("unable to find sufficient sources for streaming range %s in keyspace %s", range_, keyspace));
|
||||
}
|
||||
}
|
||||
|
||||
return range_fetch_map_map;
|
||||
}
|
||||
|
||||
std::unordered_multimap<range<token>, inet_address>
|
||||
range_streamer::get_all_ranges_with_sources_for(const sstring& keyspace_name, std::vector<range<token>> desired_ranges) {
|
||||
logger.debug("{} ks={}", __func__, keyspace_name);
|
||||
|
||||
auto& ks = _db.local().find_keyspace(keyspace_name);
|
||||
auto& strat = ks.get_replication_strategy();
|
||||
|
||||
// std::unordered_multimap<range<token>, inet_address>
|
||||
auto tm = _metadata.clone_only_token_map();
|
||||
auto range_addresses = unordered_multimap_to_unordered_map(strat.get_range_addresses(tm));
|
||||
|
||||
std::unordered_multimap<range<token>, inet_address> range_sources;
|
||||
auto& snitch = locator::i_endpoint_snitch::get_local_snitch_ptr();
|
||||
for (auto& desired_range : desired_ranges) {
|
||||
auto found = false;
|
||||
for (auto& x : range_addresses) {
|
||||
const range<token>& src_range = x.first;
|
||||
if (src_range.contains(desired_range, dht::tri_compare)) {
|
||||
std::unordered_set<inet_address>& addresses = x.second;
|
||||
auto preferred = snitch->get_sorted_list_by_proximity(_address, addresses);
|
||||
for (inet_address& p : preferred) {
|
||||
range_sources.emplace(desired_range, p);
|
||||
}
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
throw std::runtime_error(sprint("No sources found for %s", desired_range));
|
||||
}
|
||||
}
|
||||
|
||||
return range_sources;
|
||||
}
|
||||
|
||||
std::unordered_multimap<range<token>, inet_address>
|
||||
range_streamer::get_all_ranges_with_strict_sources_for(const sstring& keyspace_name, std::vector<range<token>> desired_ranges) {
|
||||
logger.debug("{} ks={}", __func__, keyspace_name);
|
||||
assert (_tokens.empty() == false);
|
||||
|
||||
auto& ks = _db.local().find_keyspace(keyspace_name);
|
||||
auto& strat = ks.get_replication_strategy();
|
||||
|
||||
//Active ranges
|
||||
auto metadata_clone = _metadata.clone_only_token_map();
|
||||
auto range_addresses = unordered_multimap_to_unordered_map(strat.get_range_addresses(metadata_clone));
|
||||
|
||||
//Pending ranges
|
||||
metadata_clone.update_normal_tokens(_tokens, _address);
|
||||
auto pending_range_addresses = unordered_multimap_to_unordered_map(strat.get_range_addresses(metadata_clone));
|
||||
|
||||
//Collects the source that will have its range moved to the new node
|
||||
std::unordered_multimap<range<token>, inet_address> range_sources;
|
||||
|
||||
for (auto& desired_range : desired_ranges) {
|
||||
for (auto& x : range_addresses) {
|
||||
const range<token>& src_range = x.first;
|
||||
if (src_range.contains(desired_range, dht::tri_compare)) {
|
||||
auto old_endpoints = x.second;
|
||||
auto it = pending_range_addresses.find(desired_range);
|
||||
assert (it != pending_range_addresses.end());
|
||||
auto new_endpoints = it->second;
|
||||
|
||||
//Due to CASSANDRA-5953 we can have a higher RF then we have endpoints.
|
||||
//So we need to be careful to only be strict when endpoints == RF
|
||||
if (old_endpoints.size() == strat.get_replication_factor()) {
|
||||
std::unordered_set<inet_address> diff;
|
||||
std::set_difference(old_endpoints.begin(), old_endpoints.end(),
|
||||
new_endpoints.begin(), new_endpoints.end(), std::inserter(diff, diff.begin()));
|
||||
old_endpoints = std::move(diff);
|
||||
if (old_endpoints.size() != 1) {
|
||||
throw std::runtime_error(sprint("Expected 1 endpoint but found ", old_endpoints.size()));
|
||||
}
|
||||
}
|
||||
range_sources.emplace(desired_range, *(old_endpoints.begin()));
|
||||
}
|
||||
}
|
||||
|
||||
//Validate
|
||||
auto nr = range_sources.count(desired_range);
|
||||
if (nr < 1) {
|
||||
throw std::runtime_error(sprint("No sources found for %s", desired_range));
|
||||
}
|
||||
|
||||
if (nr > 1) {
|
||||
throw std::runtime_error(sprint("Multiple endpoints found for %s", desired_range));
|
||||
}
|
||||
|
||||
inet_address source_ip = range_sources.find(desired_range)->second;
|
||||
auto& gossiper = gms::get_local_gossiper();
|
||||
auto source_state = gossiper.get_endpoint_state_for_endpoint(source_ip);
|
||||
if (gossiper.is_enabled() && source_state && !source_state->is_alive()) {
|
||||
throw std::runtime_error(sprint("A node required to move the data consistently is down (%s). If you wish to move the data from a potentially inconsistent replica, restart the node with -Dcassandra.consistent.rangemovement=false", source_ip));
|
||||
}
|
||||
}
|
||||
|
||||
return range_sources;
|
||||
}
|
||||
|
||||
bool range_streamer::use_strict_sources_for_ranges(const sstring& keyspace_name) {
|
||||
auto& ks = _db.local().find_keyspace(keyspace_name);
|
||||
auto& strat = ks.get_replication_strategy();
|
||||
// FIXME: DatabaseDescriptor.isReplacing()
|
||||
auto is_replacing = false;
|
||||
return !is_replacing
|
||||
&& use_strict_consistency()
|
||||
&& !_tokens.empty()
|
||||
&& _metadata.get_all_endpoints().size() != strat.get_replication_factor();
|
||||
}
|
||||
|
||||
void range_streamer::add_ranges(const sstring& keyspace_name, std::vector<range<token>> ranges) {
|
||||
auto ranges_for_keyspace = use_strict_sources_for_ranges(keyspace_name)
|
||||
? get_all_ranges_with_strict_sources_for(keyspace_name, ranges)
|
||||
: get_all_ranges_with_sources_for(keyspace_name, ranges);
|
||||
|
||||
if (logger.is_enabled(logging::log_level::debug)) {
|
||||
for (auto& x : ranges_for_keyspace) {
|
||||
logger.debug("{} : range {} exists on {}", _description, x.first, x.second);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: share code with unordered_multimap_to_unordered_map
|
||||
std::unordered_map<inet_address, std::vector<range<token>>> tmp;
|
||||
for (auto& x : get_range_fetch_map(ranges_for_keyspace, _source_filters, keyspace_name)) {
|
||||
auto& addr = x.first;
|
||||
auto& range_ = x.second;
|
||||
auto it = tmp.find(addr);
|
||||
if (it != tmp.end()) {
|
||||
it->second.push_back(range_);
|
||||
} else {
|
||||
tmp.emplace(addr, std::vector<range<token>>{range_});
|
||||
}
|
||||
}
|
||||
|
||||
if (logger.is_enabled(logging::log_level::debug)) {
|
||||
for (auto& x : tmp) {
|
||||
logger.debug("{} : range {} from source {} for keyspace {}", _description, x.second, x.first, keyspace_name);
|
||||
}
|
||||
}
|
||||
_to_fetch.emplace(keyspace_name, std::move(tmp));
|
||||
}
|
||||
|
||||
future<streaming::stream_state> range_streamer::fetch_async() {
|
||||
for (auto& fetch : _to_fetch) {
|
||||
const auto& keyspace = fetch.first;
|
||||
for (auto& x : fetch.second) {
|
||||
auto& source = x.first;
|
||||
auto& ranges = x.second;
|
||||
auto preferred = net::get_local_messaging_service().get_preferred_ip(source);
|
||||
/* Send messages to respective folks to stream data over to me */
|
||||
if (logger.is_enabled(logging::log_level::debug)) {
|
||||
logger.debug("{}ing from {} ranges {}", _description, source, ranges);
|
||||
}
|
||||
_stream_plan.request_ranges(source, preferred, keyspace, ranges);
|
||||
}
|
||||
}
|
||||
|
||||
return _stream_plan.execute();
|
||||
}
|
||||
|
||||
} // dht
|
||||
176
dht/range_streamer.hh
Normal file
176
dht/range_streamer.hh
Normal file
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "locator/token_metadata.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "streaming/stream_plan.hh"
|
||||
#include "streaming/stream_state.hh"
|
||||
#include "gms/inet_address.hh"
|
||||
#include "gms/i_failure_detector.hh"
|
||||
#include "range.hh"
|
||||
#include <seastar/core/distributed.hh>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
|
||||
class database;
|
||||
|
||||
namespace dht {
|
||||
/**
|
||||
* Assists in streaming ranges to a node.
|
||||
*/
|
||||
class range_streamer {
|
||||
public:
|
||||
using inet_address = gms::inet_address;
|
||||
using token_metadata = locator::token_metadata;
|
||||
using stream_plan = streaming::stream_plan;
|
||||
using stream_state = streaming::stream_state;
|
||||
using i_failure_detector = gms::i_failure_detector;
|
||||
static bool use_strict_consistency() {
|
||||
//FIXME: Boolean.parseBoolean(System.getProperty("cassandra.consistent.rangemovement","true"));
|
||||
return true;
|
||||
}
|
||||
public:
|
||||
/**
|
||||
* A filter applied to sources to stream from when constructing a fetch map.
|
||||
*/
|
||||
class i_source_filter {
|
||||
public:
|
||||
virtual bool should_include(inet_address endpoint) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Source filter which excludes any endpoints that are not alive according to a
|
||||
* failure detector.
|
||||
*/
|
||||
class failure_detector_source_filter : public i_source_filter {
|
||||
private:
|
||||
gms::i_failure_detector& _fd;
|
||||
public:
|
||||
failure_detector_source_filter(i_failure_detector& fd) : _fd(fd) { }
|
||||
virtual bool should_include(inet_address endpoint) override { return _fd.is_alive(endpoint); }
|
||||
};
|
||||
|
||||
/**
|
||||
* Source filter which excludes any endpoints that are not in a specific data center.
|
||||
*/
|
||||
class single_datacenter_filter : public i_source_filter {
|
||||
private:
|
||||
sstring _source_dc;
|
||||
public:
|
||||
single_datacenter_filter(const sstring& source_dc)
|
||||
: _source_dc(source_dc) {
|
||||
}
|
||||
virtual bool should_include(inet_address endpoint) override {
|
||||
auto& snitch_ptr = locator::i_endpoint_snitch::get_local_snitch_ptr();
|
||||
return snitch_ptr->get_datacenter(endpoint) == _source_dc;
|
||||
}
|
||||
};
|
||||
|
||||
range_streamer(distributed<database>& db, token_metadata& tm, std::unordered_set<token> tokens, inet_address address, sstring description)
|
||||
: _db(db)
|
||||
, _metadata(tm)
|
||||
, _tokens(std::move(tokens))
|
||||
, _address(address)
|
||||
, _description(std::move(description))
|
||||
, _stream_plan(_description, true) {
|
||||
}
|
||||
|
||||
range_streamer(distributed<database>& db, token_metadata& tm, inet_address address, sstring description)
|
||||
: range_streamer(db, tm, std::unordered_set<token>(), address, description) {
|
||||
}
|
||||
|
||||
void add_source_filter(std::unique_ptr<i_source_filter> filter) {
|
||||
_source_filters.emplace(std::move(filter));
|
||||
}
|
||||
|
||||
void add_ranges(const sstring& keyspace_name, std::vector<range<token>> ranges);
|
||||
private:
|
||||
bool use_strict_sources_for_ranges(const sstring& keyspace_name);
|
||||
/**
|
||||
* Get a map of all ranges and their respective sources that are candidates for streaming the given ranges
|
||||
* to us. For each range, the list of sources is sorted by proximity relative to the given destAddress.
|
||||
*/
|
||||
std::unordered_multimap<range<token>, inet_address>
|
||||
get_all_ranges_with_sources_for(const sstring& keyspace_name, std::vector<range<token>> desired_ranges);
|
||||
/**
|
||||
* Get a map of all ranges and the source that will be cleaned up once this bootstrapped node is added for the given ranges.
|
||||
* For each range, the list should only contain a single source. This allows us to consistently migrate data without violating
|
||||
* consistency.
|
||||
*/
|
||||
std::unordered_multimap<range<token>, inet_address>
|
||||
get_all_ranges_with_strict_sources_for(const sstring& keyspace_name, std::vector<range<token>> desired_ranges);
|
||||
private:
|
||||
/**
|
||||
* @param rangesWithSources The ranges we want to fetch (key) and their potential sources (value)
|
||||
* @param sourceFilters A (possibly empty) collection of source filters to apply. In addition to any filters given
|
||||
* here, we always exclude ourselves.
|
||||
* @return
|
||||
*/
|
||||
static std::unordered_multimap<inet_address, range<token>>
|
||||
get_range_fetch_map(const std::unordered_multimap<range<token>, inet_address>& ranges_with_sources,
|
||||
const std::unordered_set<std::unique_ptr<i_source_filter>>& source_filters,
|
||||
const sstring& keyspace);
|
||||
|
||||
#if 0
|
||||
public static Multimap<InetAddress, Range<Token>> getWorkMap(Multimap<Range<Token>, InetAddress> rangesWithSourceTarget, String keyspace)
|
||||
{
|
||||
return getRangeFetchMap(rangesWithSourceTarget, Collections.<ISourceFilter>singleton(new FailureDetectorSourceFilter(FailureDetector.instance)), keyspace);
|
||||
}
|
||||
|
||||
// For testing purposes
|
||||
Multimap<String, Map.Entry<InetAddress, Collection<Range<Token>>>> toFetch()
|
||||
{
|
||||
return toFetch;
|
||||
}
|
||||
#endif
|
||||
public:
|
||||
future<streaming::stream_state> fetch_async();
|
||||
private:
|
||||
distributed<database>& _db;
|
||||
token_metadata& _metadata;
|
||||
std::unordered_set<token> _tokens;
|
||||
inet_address _address;
|
||||
sstring _description;
|
||||
std::unordered_multimap<sstring, std::unordered_map<inet_address, std::vector<range<token>>>> _to_fetch;
|
||||
std::unordered_set<std::unique_ptr<i_source_filter>> _source_filters;
|
||||
stream_plan _stream_plan;
|
||||
};
|
||||
|
||||
} // dht
|
||||
37
dist/ami/build_ami.sh
vendored
37
dist/ami/build_ami.sh
vendored
@@ -1,9 +1,7 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
if [ ! -e dist/ami/build_ami.sh ] || [ ! -e ../scylla-jmx/dist/redhat/build_rpm.sh ] || [ ! -e ../cassandra/dist/redhat/build_rpm.sh ]; then
|
||||
if [ ! -e dist/ami/build_ami.sh ]; then
|
||||
echo "run build_ami.sh in top of scylla dir"
|
||||
echo "please make sure scylla-jmx is checked out under the same directory as scylla"
|
||||
echo "please make sure cassandra with scylla tools branch checked out under the same directory as scylla"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -14,39 +12,6 @@ if [ ! -f variables.json ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f files/scylla-server.rpm ] || [ ! -f files/scylla-server-debuginfo.rpm ]; then
|
||||
cd ../../
|
||||
dist/redhat/build_rpm.sh
|
||||
SCYLLA_VERSION=$(cat build/SCYLLA-VERSION-FILE)
|
||||
SCYLLA_RELEASE=$(cat build/SCYLLA-RELEASE-FILE)
|
||||
RPM=`ls build/rpms/scylla-server-$SCYLLA_VERSION-$SCYLLA_RELEASE*.x86_64.rpm|grep -v debuginfo`
|
||||
cp $RPM dist/ami/files/scylla-server.rpm
|
||||
cp build/rpms/scylla-server-debuginfo-$SCYLLA_VERSION-$SCYLLA_RELEASE*.x86_64.rpm dist/ami/files/scylla-server-debuginfo.rpm
|
||||
cd -
|
||||
fi
|
||||
|
||||
if [ ! -f files/scylla-jmx.rpm ]; then
|
||||
CWD=`pwd`
|
||||
cd ../../../scylla-jmx
|
||||
dist/redhat/build_rpm.sh
|
||||
SCYLLA_VERSION=$(cat build/SCYLLA-VERSION-FILE)
|
||||
SCYLLA_RELEASE=$(cat build/SCYLLA-RELEASE-FILE)
|
||||
RPM=`ls build/rpms/scylla-jmx-$SCYLLA_VERSION-$SCYLLA_RELEASE*.noarch.rpm`
|
||||
cp $RPM $CWD/files/scylla-jmx.rpm
|
||||
cd -
|
||||
fi
|
||||
|
||||
if [ ! -f files/scylla-tools.rpm ]; then
|
||||
CWD=`pwd`
|
||||
cd ../../../cassandra
|
||||
dist/redhat/build_rpm.sh
|
||||
SCYLLA_VERSION=$(cat build/SCYLLA-VERSION-FILE)
|
||||
SCYLLA_RELEASE=$(cat build/SCYLLA-RELEASE-FILE)
|
||||
RPM=`ls build/rpms/scylla-tools-$SCYLLA_VERSION-$SCYLLA_RELEASE*.noarch.rpm`
|
||||
cp $RPM $CWD/files/scylla-tools.rpm
|
||||
cd -
|
||||
fi
|
||||
|
||||
if [ ! -d packer ]; then
|
||||
wget https://dl.bintray.com/mitchellh/packer/packer_0.8.6_linux_amd64.zip
|
||||
mkdir packer
|
||||
|
||||
2
dist/ami/files/scylla-ami
vendored
2
dist/ami/files/scylla-ami
vendored
Submodule dist/ami/files/scylla-ami updated: c6ddbea9a0...3f371840c9
42
dist/ami/files/scylla-setup.sh
vendored
42
dist/ami/files/scylla-setup.sh
vendored
@@ -1,27 +1,49 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
if [ -f /dev/md0 ]; then
|
||||
if [ -b /dev/md0 ]; then
|
||||
echo "RAID already constructed."
|
||||
exit 1
|
||||
fi
|
||||
mdadm --create --verbose --force --run /dev/md0 --level=0 -c256 --raid-devices=2 /dev/xvdb /dev/xvdc
|
||||
blockdev --setra 65536 /dev/md0
|
||||
mkfs.xfs /dev/md0 -f
|
||||
echo "DEVICE /dev/xvdb /dev/xvdc" > /etc/mdadm.conf
|
||||
mdadm --detail --scan >> /etc/mdadm.conf
|
||||
UUID=`blkid /dev/md0 | awk '{print $2}'`
|
||||
mkdir /data
|
||||
echo "$UUID /data xfs noatime 0 0" >> /etc/fstab
|
||||
mount /data
|
||||
|
||||
dnf update -y
|
||||
|
||||
DISKS=""
|
||||
NR=0
|
||||
for i in xvd{b..z}; do
|
||||
if [ -b /dev/$i ];then
|
||||
echo Found disk /dev/$i
|
||||
DISKS="$DISKS /dev/$i"
|
||||
NR=$((NR+1))
|
||||
fi
|
||||
done
|
||||
|
||||
echo Creating RAID0 for scylla using $NR disk\(s\): $DISKS
|
||||
|
||||
if [ $NR -ge 1 ]; then
|
||||
mdadm --create --verbose --force --run /dev/md0 --level=0 -c256 --raid-devices=$NR $DISKS
|
||||
blockdev --setra 65536 /dev/md0
|
||||
mkfs.xfs /dev/md0 -f
|
||||
echo "DEVICE $DISKS" > /etc/mdadm.conf
|
||||
mdadm --detail --scan >> /etc/mdadm.conf
|
||||
UUID=`blkid /dev/md0 | awk '{print $2}'`
|
||||
mkdir /data
|
||||
echo "$UUID /data xfs noatime 0 0" >> /etc/fstab
|
||||
mount /data
|
||||
else
|
||||
echo "WARN: Scylla is not using XFS to store data. Perforamnce will suffer." > /home/fedora/WARN_PLEASE_READ.TXT
|
||||
fi
|
||||
|
||||
mkdir -p /data/data
|
||||
mkdir -p /data/commitlog
|
||||
chown scylla:scylla /data/*
|
||||
|
||||
CPU_NR=`cat /proc/cpuinfo |grep processor|wc -l`
|
||||
if [ $CPU_NR -ge 8 ]; then
|
||||
NR=$((CPU_NR - 1))
|
||||
echo SCYLLA_ARGS=\"--cpuset 1-$NR --smp $NR\" >> /etc/sysconfig/scylla-server
|
||||
echo SET_NIC=\"yes\" >> /etc/sysconfig/scylla-server
|
||||
fi
|
||||
|
||||
/usr/lib/scylla/scylla-ami/ds2_configure.py
|
||||
systemctl disable scylla-setup.service
|
||||
systemctl enable scylla-server.service
|
||||
|
||||
11
dist/ami/files/scylla.repo
vendored
Normal file
11
dist/ami/files/scylla.repo
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[scylla]
|
||||
name=Scylla for Fedora $releasever - $basearch
|
||||
baseurl=https://s3.amazonaws.com/downloads.scylladb.com/rpm/fedora/$releasever/$basearch/
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
[scylla-generic]
|
||||
name=Scylla for Fedora $releasever
|
||||
baseurl=https://s3.amazonaws.com/downloads.scylladb.com/rpm/fedora/$releasever/noarch/
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
20
dist/ami/files/setup-ami.sh
vendored
Executable file
20
dist/ami/files/setup-ami.sh
vendored
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
setenforce 0
|
||||
sed -e "s/enforcing/disabled/" /etc/sysconfig/selinux > /tmp/selinux
|
||||
mv /tmp/selinux /etc/sysconfig/
|
||||
dnf update -y
|
||||
mv /home/fedora/scylla.repo /etc/yum.repos.d/
|
||||
dnf install -y scylla-server scylla-server-debuginfo scylla-jmx scylla-tools
|
||||
dnf install -y mdadm xfsprogs
|
||||
cp /home/fedora/coredump.conf /etc/systemd/coredump.conf
|
||||
mv /home/fedora/scylla-setup.service /usr/lib/systemd/system
|
||||
mv /home/fedora/scylla-setup.sh /usr/lib/scylla
|
||||
chmod a+rx /usr/lib/scylla/scylla-setup.sh
|
||||
mv /home/fedora/scylla-ami /usr/lib/scylla/scylla-ami
|
||||
chmod a+rx /usr/lib/scylla/scylla-ami/ds2_configure.py
|
||||
systemctl enable scylla-setup.service
|
||||
sed -e 's!/var/lib/scylla/data!/data/data!' -e 's!commitlog_directory: /var/lib/scylla/commitlog!commitlog_directory: /data/commitlog!' /var/lib/scylla/conf/scylla.yaml > /tmp/scylla.yaml
|
||||
mv /tmp/scylla.yaml /var/lib/scylla/conf
|
||||
grep -v ' - mounts' /etc/cloud/cloud.cfg > /tmp/cloud.cfg
|
||||
mv /tmp/cloud.cfg /etc/cloud/cloud.cfg
|
||||
15
dist/ami/scylla.json
vendored
15
dist/ami/scylla.json
vendored
@@ -24,20 +24,7 @@
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"sudo dnf update -y",
|
||||
"sudo dnf install -y /home/fedora/*.rpm",
|
||||
"sudo dnf install -y mdadm xfsprogs",
|
||||
"sudo cp /home/fedora/coredump.conf /etc/systemd/coredump.conf",
|
||||
"sudo mv /home/fedora/scylla-setup.service /usr/lib/systemd/system",
|
||||
"sudo mv /home/fedora/scylla-setup.sh /usr/lib/scylla",
|
||||
"sudo chmod a+rx /usr/lib/scylla/scylla-setup.sh",
|
||||
"sudo mv /home/fedora/scylla-ami /usr/lib/scylla/scylla-ami",
|
||||
"sudo chmod a+rx /usr/lib/scylla/scylla-ami/ds2_configure.py",
|
||||
"sudo systemctl enable scylla-setup.service",
|
||||
"sudo sed -e 's!/var/lib/scylla/data!/data/data!' -e 's!commitlog_directory: /var/lib/scylla/commitlog!commitlog_directory: /data/commitlog!' /var/lib/scylla/conf/scylla.yaml > /tmp/scylla.yaml",
|
||||
"sudo mv /tmp/scylla.yaml /var/lib/scylla/conf",
|
||||
"grep -v ' - mounts' /etc/cloud/cloud.cfg > /tmp/cloud.cfg",
|
||||
"sudo mv /tmp/cloud.cfg /etc/cloud/cloud.cfg"
|
||||
"sudo sh -x -e /home/fedora/setup-ami.sh"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
20
dist/common/scripts/scylla_prepare
vendored
Executable file
20
dist/common/scripts/scylla_prepare
vendored
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
if [ "$NETWORK_MODE" = "virtio" ]; then
|
||||
ip tuntap del mode tap dev $TAP
|
||||
ip tuntap add mode tap dev $TAP user $USER one_queue vnet_hdr
|
||||
ip link set dev $TAP up
|
||||
ip link set dev $TAP master $BRIDGE
|
||||
chown $USER.$GROUP /dev/vhost-net
|
||||
elif [ "$NETWORK_MODE" = "dpdk" ]; then
|
||||
modprobe uio
|
||||
modprobe uio_pci_generic
|
||||
/usr/lib/scylla/dpdk_nic_bind.py --force --bind=uio_pci_generic $ETHPCIID
|
||||
for n in /sys/devices/system/node/node?; do
|
||||
echo $NR_HUGEPAGES > $n/hugepages/hugepages-2048kB/nr_hugepages
|
||||
done
|
||||
fi
|
||||
. /etc/os-release
|
||||
if [ "$NAME" = "Ubuntu" ]; then
|
||||
hugeadm --create-mounts
|
||||
fi
|
||||
@@ -14,4 +14,4 @@ if [ "$SET_NIC" == "yes" ]; then
|
||||
sudo sh /usr/lib/scylla/posix_net_conf.sh >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
exec sudo -u $USER env HOME=/var/lib/scylla /usr/bin/scylla $args
|
||||
exec sudo -u $USER env HOME=/var/lib/scylla SCYLLA_HOME=/var/lib/scylla /usr/bin/scylla $args
|
||||
8
dist/common/scripts/scylla_stop
vendored
Executable file
8
dist/common/scripts/scylla_stop
vendored
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
if [ "$NETWORK_MODE" = "virtio" ]; then
|
||||
ip tuntap del mode tap dev $TAP
|
||||
elif [ "$NETWORK_MODE" = "dpdk" ]; then
|
||||
/usr/lib/scylla/dpdk_nic_bind.py -u $ETHPCIID
|
||||
/usr/lib/scylla/dpdk_nic_bind.py -b $ETHDRV $ETHPCIID
|
||||
fi
|
||||
1
dist/redhat/build_rpm.sh
vendored
1
dist/redhat/build_rpm.sh
vendored
@@ -37,6 +37,7 @@ if [ "$OS" = "Fedora" ]; then
|
||||
rpmbuild -bs --define "_topdir $RPMBUILD" $RPMBUILD/SPECS/scylla-server.spec
|
||||
mock rebuild --resultdir=`pwd`/build/rpms $RPMBUILD/SRPMS/scylla-server-$VERSION*.src.rpm
|
||||
else
|
||||
. /etc/profile.d/scylla.sh
|
||||
sudo yum-builddep -y $RPMBUILD/SPECS/scylla-server.spec
|
||||
rpmbuild -ba --define "_topdir $RPMBUILD" $RPMBUILD/SPECS/scylla-server.spec
|
||||
fi
|
||||
|
||||
32
dist/redhat/centos_dep/antlr3
vendored
32
dist/redhat/centos_dep/antlr3
vendored
@@ -1,33 +1,3 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# antlr3 script
|
||||
# JPackage Project <http://www.jpackage.org/>
|
||||
|
||||
# Source functions library
|
||||
_prefer_jre="true"
|
||||
. /usr/share/java-utils/java-functions
|
||||
|
||||
# Source system prefs
|
||||
if [ -f /etc/java/antlr3.conf ] ; then
|
||||
. /etc/java/antlr3.conf
|
||||
fi
|
||||
|
||||
# Source user prefs
|
||||
if [ -f $HOME/.antlr3rc ] ; then
|
||||
. $HOME/.antlr3rc
|
||||
fi
|
||||
|
||||
# Configuration
|
||||
MAIN_CLASS=org.antlr.Tool
|
||||
BASE_FLAGS=''
|
||||
BASE_OPTIONS=''
|
||||
BASE_JARS="antlr3.jar"
|
||||
|
||||
# Set parameters
|
||||
set_jvm
|
||||
set_classpath $BASE_JARS
|
||||
set_flags $BASE_FLAGS
|
||||
set_options $BASE_OPTIONS
|
||||
|
||||
# Let's start
|
||||
run "$@"
|
||||
exec /usr/bin/java -jar /opt/scylladb/lib/java/antlr3.jar $*
|
||||
|
||||
81
dist/redhat/centos_dep/binutils.diff
vendored
Normal file
81
dist/redhat/centos_dep/binutils.diff
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
--- binutils.spec 2015-10-19 05:45:55.106745163 +0000
|
||||
+++ binutils.spec.1 2015-10-19 05:45:55.807742899 +0000
|
||||
@@ -17,7 +17,7 @@
|
||||
%define enable_deterministic_archives 1
|
||||
|
||||
Summary: A GNU collection of binary utilities
|
||||
-Name: %{?cross}binutils%{?_with_debug:-debug}
|
||||
+Name: scylla-%{?cross}binutils%{?_with_debug:-debug}
|
||||
Version: 2.25
|
||||
Release: 5%{?dist}
|
||||
License: GPLv3+
|
||||
@@ -29,6 +29,7 @@
|
||||
# instead.
|
||||
|
||||
Source: http://ftp.gnu.org/gnu/binutils/binutils-%{version}.tar.bz2
|
||||
+%define _prefix /opt/scylladb
|
||||
|
||||
Source2: binutils-2.19.50.0.1-output-format.sed
|
||||
Patch01: binutils-2.20.51.0.2-libtool-lib64.patch
|
||||
@@ -82,6 +83,9 @@
|
||||
BuildRequires: texinfo >= 4.0, gettext, flex, bison, zlib-devel
|
||||
# BZ 920545: We need pod2man in order to build the manual pages.
|
||||
BuildRequires: /usr/bin/pod2man
|
||||
+
|
||||
+Requires: scylla-env
|
||||
+
|
||||
# Required for: ld-bootstrap/bootstrap.exp bootstrap with --static
|
||||
# It should not be required for: ld-elf/elf.exp static {preinit,init,fini} array
|
||||
%if %{run_testsuite}
|
||||
@@ -105,8 +109,8 @@
|
||||
|
||||
%if "%{build_gold}" == "both"
|
||||
Requires(post): coreutils
|
||||
-Requires(post): %{_sbindir}/alternatives
|
||||
-Requires(preun): %{_sbindir}/alternatives
|
||||
+Requires(post): /sbin/alternatives
|
||||
+Requires(preun): /sbin/alternatives
|
||||
%endif
|
||||
|
||||
# On ARM EABI systems, we do want -gnueabi to be part of the
|
||||
@@ -131,11 +135,12 @@
|
||||
%package devel
|
||||
Summary: BFD and opcodes static and dynamic libraries and header files
|
||||
Group: System Environment/Libraries
|
||||
-Provides: binutils-static = %{version}-%{release}
|
||||
+Provides: scylla-binutils-static = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
Requires(post): /sbin/install-info
|
||||
Requires(preun): /sbin/install-info
|
||||
Requires: zlib-devel
|
||||
-Requires: binutils = %{version}-%{release}
|
||||
+Requires: scylla-binutils = %{version}-%{release}
|
||||
|
||||
%description devel
|
||||
This package contains BFD and opcodes static and dynamic libraries.
|
||||
@@ -411,11 +416,11 @@
|
||||
%post
|
||||
%if "%{build_gold}" == "both"
|
||||
%__rm -f %{_bindir}/%{?cross}ld
|
||||
-%{_sbindir}/alternatives --install %{_bindir}/%{?cross}ld %{?cross}ld \
|
||||
+/sbin/alternatives --install %{_bindir}/%{?cross}ld %{?cross}ld \
|
||||
%{_bindir}/%{?cross}ld.bfd %{ld_bfd_priority}
|
||||
-%{_sbindir}/alternatives --install %{_bindir}/%{?cross}ld %{?cross}ld \
|
||||
+/sbin/alternatives --install %{_bindir}/%{?cross}ld %{?cross}ld \
|
||||
%{_bindir}/%{?cross}ld.gold %{ld_gold_priority}
|
||||
-%{_sbindir}/alternatives --auto %{?cross}ld
|
||||
+/sbin/alternatives --auto %{?cross}ld
|
||||
%endif
|
||||
%if %{isnative}
|
||||
/sbin/ldconfig
|
||||
@@ -433,8 +438,8 @@
|
||||
%preun
|
||||
%if "%{build_gold}" == "both"
|
||||
if [ $1 = 0 ]; then
|
||||
- %{_sbindir}/alternatives --remove %{?cross}ld %{_bindir}/%{?cross}ld.bfd
|
||||
- %{_sbindir}/alternatives --remove %{?cross}ld %{_bindir}/%{?cross}ld.gold
|
||||
+ /sbin/alternatives --remove %{?cross}ld %{_bindir}/%{?cross}ld.bfd
|
||||
+ /sbin/alternatives --remove %{?cross}ld %{_bindir}/%{?cross}ld.gold
|
||||
fi
|
||||
%endif
|
||||
%if %{isnative}
|
||||
523
dist/redhat/centos_dep/boost.diff
vendored
Normal file
523
dist/redhat/centos_dep/boost.diff
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
--- boost.spec 2015-05-03 17:32:13.000000000 +0000
|
||||
+++ boost.spec.1 2015-10-19 06:03:12.670534256 +0000
|
||||
@@ -6,6 +6,11 @@
|
||||
# We should be able to install directly.
|
||||
%define boost_docdir __tmp_docdir
|
||||
%define boost_examplesdir __tmp_examplesdir
|
||||
+%define _without_python3 --without-python3
|
||||
+%define _without_mpich --without-mpich
|
||||
+%define _without_openmpi --without-openmpi
|
||||
+%define _without_context --without-context
|
||||
+%define _prefix /opt/scylladb
|
||||
|
||||
%ifarch ppc64le
|
||||
%bcond_with mpich
|
||||
@@ -28,18 +33,19 @@
|
||||
|
||||
%bcond_without python3
|
||||
|
||||
-Name: boost
|
||||
+Name: scylla-boost
|
||||
+%define orig_name boost
|
||||
Summary: The free peer-reviewed portable C++ source libraries
|
||||
Version: 1.57.0
|
||||
%define version_enc 1_57_0
|
||||
Release: 6%{?dist}
|
||||
License: Boost and MIT and Python
|
||||
|
||||
-%define toplev_dirname %{name}_%{version_enc}
|
||||
+%define toplev_dirname %{orig_name}_%{version_enc}
|
||||
URL: http://www.boost.org
|
||||
Group: System Environment/Libraries
|
||||
|
||||
-Source0: http://downloads.sourceforge.net/%{name}/%{toplev_dirname}.tar.bz2
|
||||
+Source0: http://downloads.sourceforge.net/%{orig_name}/%{toplev_dirname}.tar.bz2
|
||||
Source1: ver.py
|
||||
Source2: libboost_thread.so
|
||||
|
||||
@@ -47,34 +53,36 @@
|
||||
# equal to the Boost version (e.g., 1.41.0).
|
||||
%define sonamever %{version}
|
||||
|
||||
+Requires: scylla-env
|
||||
+
|
||||
# boost is an "umbrella" package that pulls in all other boost
|
||||
# components, except for MPI and Python 3 sub-packages. Those are
|
||||
# special in that they are rarely necessary, and it's not a big burden
|
||||
# to have interested parties install them explicitly.
|
||||
-Requires: boost-atomic%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-chrono%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-atomic%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-chrono%{?_isa} = %{version}-%{release}
|
||||
%if %{with context}
|
||||
-Requires: boost-context%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-coroutine%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-context%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-coroutine%{?_isa} = %{version}-%{release}
|
||||
%endif
|
||||
-Requires: boost-date-time%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-filesystem%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-graph%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-iostreams%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-locale%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-log%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-math%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-program-options%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-python%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-random%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-regex%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-signals%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-test%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-thread%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-timer%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-wave%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-date-time%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-filesystem%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-graph%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-iostreams%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-locale%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-log%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-math%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-program-options%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-python%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-random%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-regex%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-signals%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-test%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-thread%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-timer%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-wave%{?_isa} = %{version}-%{release}
|
||||
|
||||
BuildRequires: m4
|
||||
BuildRequires: libstdc++-devel%{?_isa}
|
||||
@@ -151,6 +159,7 @@
|
||||
%package atomic
|
||||
Summary: Run-Time component of boost atomic library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description atomic
|
||||
|
||||
@@ -162,7 +171,8 @@
|
||||
%package chrono
|
||||
Summary: Run-Time component of boost chrono library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description chrono
|
||||
|
||||
@@ -171,6 +181,7 @@
|
||||
%package container
|
||||
Summary: Run-Time component of boost container library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description container
|
||||
|
||||
@@ -183,6 +194,7 @@
|
||||
%package context
|
||||
Summary: Run-Time component of boost context switching library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description context
|
||||
|
||||
@@ -192,6 +204,7 @@
|
||||
%package coroutine
|
||||
Summary: Run-Time component of boost coroutine library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description coroutine
|
||||
Run-Time support for Boost.Coroutine, a library that provides
|
||||
@@ -203,6 +216,7 @@
|
||||
%package date-time
|
||||
Summary: Run-Time component of boost date-time library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description date-time
|
||||
|
||||
@@ -212,7 +226,8 @@
|
||||
%package filesystem
|
||||
Summary: Run-Time component of boost filesystem library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description filesystem
|
||||
|
||||
@@ -223,7 +238,8 @@
|
||||
%package graph
|
||||
Summary: Run-Time component of boost graph library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-regex%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-regex%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description graph
|
||||
|
||||
@@ -243,9 +259,10 @@
|
||||
%package locale
|
||||
Summary: Run-Time component of boost locale library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-chrono%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-thread%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-chrono%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-thread%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description locale
|
||||
|
||||
@@ -255,6 +272,7 @@
|
||||
%package log
|
||||
Summary: Run-Time component of boost logging library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description log
|
||||
|
||||
@@ -265,6 +283,7 @@
|
||||
%package math
|
||||
Summary: Math functions for boost TR1 library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description math
|
||||
|
||||
@@ -274,6 +293,7 @@
|
||||
%package program-options
|
||||
Summary: Run-Time component of boost program_options library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description program-options
|
||||
|
||||
@@ -284,6 +304,7 @@
|
||||
%package python
|
||||
Summary: Run-Time component of boost python library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description python
|
||||
|
||||
@@ -298,6 +319,7 @@
|
||||
%package python3
|
||||
Summary: Run-Time component of boost python library for Python 3
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description python3
|
||||
|
||||
@@ -310,8 +332,9 @@
|
||||
%package python3-devel
|
||||
Summary: Shared object symbolic links for Boost.Python 3
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-python3%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-devel%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-python3%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-devel%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description python3-devel
|
||||
|
||||
@@ -322,6 +345,7 @@
|
||||
%package random
|
||||
Summary: Run-Time component of boost random library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description random
|
||||
|
||||
@@ -330,6 +354,7 @@
|
||||
%package regex
|
||||
Summary: Run-Time component of boost regular expression library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description regex
|
||||
|
||||
@@ -338,6 +363,7 @@
|
||||
%package serialization
|
||||
Summary: Run-Time component of boost serialization library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description serialization
|
||||
|
||||
@@ -346,6 +372,7 @@
|
||||
%package signals
|
||||
Summary: Run-Time component of boost signals and slots library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description signals
|
||||
|
||||
@@ -354,6 +381,7 @@
|
||||
%package system
|
||||
Summary: Run-Time component of boost system support library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description system
|
||||
|
||||
@@ -364,6 +392,7 @@
|
||||
%package test
|
||||
Summary: Run-Time component of boost test library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
|
||||
%description test
|
||||
|
||||
@@ -373,7 +402,8 @@
|
||||
%package thread
|
||||
Summary: Run-Time component of boost thread library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description thread
|
||||
|
||||
@@ -385,8 +415,9 @@
|
||||
%package timer
|
||||
Summary: Run-Time component of boost timer library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-chrono%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-chrono%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description timer
|
||||
|
||||
@@ -397,11 +428,12 @@
|
||||
%package wave
|
||||
Summary: Run-Time component of boost C99/C++ pre-processing library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-chrono%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-date-time%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-filesystem%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-system%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-thread%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-chrono%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-date-time%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-filesystem%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-system%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-thread%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description wave
|
||||
|
||||
@@ -412,27 +444,20 @@
|
||||
%package devel
|
||||
Summary: The Boost C++ headers and shared development libraries
|
||||
Group: Development/Libraries
|
||||
-Requires: boost%{?_isa} = %{version}-%{release}
|
||||
-Provides: boost-python-devel
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost%{?_isa} = %{version}-%{release}
|
||||
+Provides: scylla-boost-python-devel
|
||||
Requires: libicu-devel%{?_isa}
|
||||
|
||||
-# Odeint was shipped in Fedora 18, but later became part of Boost.
|
||||
-# Note we also obsolete odeint-doc down there.
|
||||
-# https://bugzilla.redhat.com/show_bug.cgi?id=892850
|
||||
-Provides: odeint = 2.2-5
|
||||
-Obsoletes: odeint < 2.2-5
|
||||
-Provides: odeint-devel = 2.2-5
|
||||
-Obsoletes: odeint-devel < 2.2-5
|
||||
-
|
||||
%description devel
|
||||
Headers and shared object symbolic links for the Boost C++ libraries.
|
||||
|
||||
%package static
|
||||
Summary: The Boost C++ static development libraries
|
||||
Group: Development/Libraries
|
||||
-Requires: boost-devel%{?_isa} = %{version}-%{release}
|
||||
-Obsoletes: boost-devel-static < 1.34.1-14
|
||||
-Provides: boost-devel-static = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-devel%{?_isa} = %{version}-%{release}
|
||||
+Provides: scylla-boost-devel-static = %{version}-%{release}
|
||||
|
||||
%description static
|
||||
Static Boost C++ libraries.
|
||||
@@ -443,11 +468,7 @@
|
||||
%if 0%{?rhel} >= 6
|
||||
BuildArch: noarch
|
||||
%endif
|
||||
-Provides: boost-python-docs = %{version}-%{release}
|
||||
-
|
||||
-# See the description above.
|
||||
-Provides: odeint-doc = 2.2-5
|
||||
-Obsoletes: odeint-doc < 2.2-5
|
||||
+Provides: scylla-boost-python-docs = %{version}-%{release}
|
||||
|
||||
%description doc
|
||||
This package contains the documentation in the HTML format of the Boost C++
|
||||
@@ -460,7 +481,7 @@
|
||||
%if 0%{?rhel} >= 6
|
||||
BuildArch: noarch
|
||||
%endif
|
||||
-Requires: boost-devel = %{version}-%{release}
|
||||
+Requires: scylla-boost-devel = %{version}-%{release}
|
||||
|
||||
%description examples
|
||||
This package contains example source files distributed with boost.
|
||||
@@ -471,9 +492,10 @@
|
||||
%package openmpi
|
||||
Summary: Run-Time component of Boost.MPI library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
Requires: openmpi%{?_isa}
|
||||
BuildRequires: openmpi-devel
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description openmpi
|
||||
|
||||
@@ -483,10 +505,11 @@
|
||||
%package openmpi-devel
|
||||
Summary: Shared library symbolic links for Boost.MPI
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-devel%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-openmpi%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-openmpi-python%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-graph-openmpi%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-devel%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-openmpi%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-openmpi-python%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-graph-openmpi%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description openmpi-devel
|
||||
|
||||
@@ -496,9 +519,10 @@
|
||||
%package openmpi-python
|
||||
Summary: Python run-time component of Boost.MPI library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-openmpi%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-python%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-openmpi%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-python%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description openmpi-python
|
||||
|
||||
@@ -508,8 +532,9 @@
|
||||
%package graph-openmpi
|
||||
Summary: Run-Time component of parallel boost graph library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-openmpi%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-openmpi%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description graph-openmpi
|
||||
|
||||
@@ -526,11 +551,11 @@
|
||||
%package mpich
|
||||
Summary: Run-Time component of Boost.MPI library
|
||||
Group: System Environment/Libraries
|
||||
+Requires: scylla-env
|
||||
Requires: mpich%{?_isa}
|
||||
BuildRequires: mpich-devel
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
-Provides: boost-mpich2 = %{version}-%{release}
|
||||
-Obsoletes: boost-mpich2 < 1.53.0-9
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Provides: scylla-boost-mpich2 = %{version}-%{release}
|
||||
|
||||
%description mpich
|
||||
|
||||
@@ -540,12 +565,12 @@
|
||||
%package mpich-devel
|
||||
Summary: Shared library symbolic links for Boost.MPI
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-devel%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-mpich%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-mpich-python%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-graph-mpich%{?_isa} = %{version}-%{release}
|
||||
-Provides: boost-mpich2-devel = %{version}-%{release}
|
||||
-Obsoletes: boost-mpich2-devel < 1.53.0-9
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-devel%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-mpich%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-mpich-python%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-graph-mpich%{?_isa} = %{version}-%{release}
|
||||
+Provides: scylla-boost-mpich2-devel = %{version}-%{release}
|
||||
|
||||
%description mpich-devel
|
||||
|
||||
@@ -555,11 +580,11 @@
|
||||
%package mpich-python
|
||||
Summary: Python run-time component of Boost.MPI library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-mpich%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-python%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
-Provides: boost-mpich2-python = %{version}-%{release}
|
||||
-Obsoletes: boost-mpich2-python < 1.53.0-9
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-mpich%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-python%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Provides: scylla-boost-mpich2-python = %{version}-%{release}
|
||||
|
||||
%description mpich-python
|
||||
|
||||
@@ -569,10 +594,10 @@
|
||||
%package graph-mpich
|
||||
Summary: Run-Time component of parallel boost graph library
|
||||
Group: System Environment/Libraries
|
||||
-Requires: boost-mpich%{?_isa} = %{version}-%{release}
|
||||
-Requires: boost-serialization%{?_isa} = %{version}-%{release}
|
||||
-Provides: boost-graph-mpich2 = %{version}-%{release}
|
||||
-Obsoletes: boost-graph-mpich2 < 1.53.0-9
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-mpich%{?_isa} = %{version}-%{release}
|
||||
+Requires: scylla-boost-serialization%{?_isa} = %{version}-%{release}
|
||||
+Provides: scylla-boost-graph-mpich2 = %{version}-%{release}
|
||||
|
||||
%description graph-mpich
|
||||
|
||||
@@ -586,7 +611,8 @@
|
||||
%package build
|
||||
Summary: Cross platform build system for C++ projects
|
||||
Group: Development/Tools
|
||||
-Requires: boost-jam
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-boost-jam
|
||||
BuildArch: noarch
|
||||
|
||||
%description build
|
||||
@@ -600,6 +626,7 @@
|
||||
%package jam
|
||||
Summary: A low-level build tool
|
||||
Group: Development/Tools
|
||||
+Requires: scylla-env
|
||||
|
||||
%description jam
|
||||
Boost.Jam (BJam) is the low-level build engine tool for Boost.Build.
|
||||
@@ -1134,7 +1161,7 @@
|
||||
%files devel
|
||||
%defattr(-, root, root, -)
|
||||
%doc LICENSE_1_0.txt
|
||||
-%{_includedir}/%{name}
|
||||
+%{_includedir}/%{orig_name}
|
||||
%{_libdir}/libboost_atomic.so
|
||||
%{_libdir}/libboost_chrono.so
|
||||
%{_libdir}/libboost_container.so
|
||||
83
dist/redhat/centos_dep/build_dependency.sh
vendored
83
dist/redhat/centos_dep/build_dependency.sh
vendored
@@ -13,15 +13,15 @@ mkdir -p build/srpms
|
||||
cd build/srpms
|
||||
|
||||
if [ ! -f binutils-2.25-5.fc22.src.rpm ]; then
|
||||
wget http://ftp.riken.jp/Linux/fedora/releases/22/Everything/source/SRPMS/b/binutils-2.25-5.fc22.src.rpm
|
||||
wget http://download.fedoraproject.org/pub/fedora/linux/releases/22/Everything/source/SRPMS/b/binutils-2.25-5.fc22.src.rpm
|
||||
fi
|
||||
|
||||
if [ ! -f isl-0.14-3.fc22.src.rpm ]; then
|
||||
wget http://ftp.riken.jp/Linux/fedora/releases/22/Everything/source/SRPMS/i/isl-0.14-3.fc22.src.rpm
|
||||
wget http://download.fedoraproject.org/pub/fedora/linux/releases/22/Everything/source/SRPMS/i/isl-0.14-3.fc22.src.rpm
|
||||
fi
|
||||
|
||||
if [ ! -f gcc-5.1.1-4.fc22.src.rpm ]; then
|
||||
wget http://ftp.riken.jp/Linux/fedora/updates/22/SRPMS/g/gcc-5.1.1-4.fc22.src.rpm
|
||||
wget http://download.fedoraproject.org/pub/fedora/linux/updates/22/SRPMS/g/gcc-5.1.1-4.fc22.src.rpm
|
||||
fi
|
||||
|
||||
if [ ! -f boost-1.57.0-6.fc22.src.rpm ]; then
|
||||
@@ -36,10 +36,6 @@ if [ ! -f ragel-6.8-3.fc22.src.rpm ]; then
|
||||
wget http://download.fedoraproject.org/pub/fedora/linux/releases/22/Everything/source/SRPMS/r/ragel-6.8-3.fc22.src.rpm
|
||||
fi
|
||||
|
||||
if [ ! -f re2c-0.13.5-9.fc22.src.rpm ]; then
|
||||
wget http://download.fedoraproject.org/pub/fedora/linux/releases/22/Everything/source/SRPMS/r/re2c-0.13.5-9.fc22.src.rpm
|
||||
fi
|
||||
|
||||
cd -
|
||||
|
||||
sudo yum install -y epel-release
|
||||
@@ -49,63 +45,76 @@ sudo ln -sf /usr/bin/python3.4 /usr/bin/python3
|
||||
sudo yum install -y python-devel libicu-devel openmpi-devel mpich-devel libstdc++-devel bzip2-devel zlib-devel
|
||||
sudo yum install -y flex bison dejagnu zlib-static glibc-static sharutils bc libstdc++-static gmp-devel texinfo texinfo-tex systemtap-sdt-devel mpfr-devel libmpc-devel elfutils-devel elfutils-libelf-devel glibc-devel.x86_64 glibc-devel.i686 gcc-gnat libgnat doxygen graphviz dblatex texlive-collection-latex docbook5-style-xsl python-sphinx cmake
|
||||
sudo yum install -y gcc-objc
|
||||
sudo yum install -y asciidoc
|
||||
sudo yum install -y gettext
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/binutils-2.25-5.el7.centos.x86_64.rpm ]; then
|
||||
rpmbuild --define "_topdir $RPMBUILD" --rebuild build/srpms/binutils-2.25-5.fc22.src.rpm
|
||||
if [ ! -f $RPMBUILD/RPMS/noarch/scylla-env-1.0-1.el7.centos.noarch.rpm ]; then
|
||||
cd dist/redhat/centos_dep
|
||||
tar cpf $RPMBUILD/SOURCES/scylla-env-1.0.tar scylla-env-1.0
|
||||
cd -
|
||||
rpmbuild --define "_topdir $RPMBUILD" --ba dist/redhat/centos_dep/scylla-env.spec
|
||||
fi
|
||||
do_install binutils-2.25-5.el7.centos.x86_64.rpm
|
||||
do_install scylla-env-1.0-1.el7.centos.noarch.rpm
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/isl-0.14-3.el7.centos.x86_64.rpm ]; then
|
||||
rpmbuild --define "_topdir $RPMBUILD" --rebuild build/srpms/isl-0.14-3.fc22.src.rpm
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-binutils-2.25-5.el7.centos.x86_64.rpm ]; then
|
||||
rpm --define "_topdir $RPMBUILD" -ivh build/srpms/binutils-2.25-5.fc22.src.rpm
|
||||
patch $RPMBUILD/SPECS/binutils.spec < dist/redhat/centos_dep/binutils.diff
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba $RPMBUILD/SPECS/binutils.spec
|
||||
fi
|
||||
do_install isl-0.14-3.el7.centos.x86_64.rpm
|
||||
do_install isl-devel-0.14-3.el7.centos.x86_64.rpm
|
||||
do_install scylla-binutils-2.25-5.el7.centos.x86_64.rpm
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/gcc-5.1.1-4.el7.centos.x86_64.rpm ]; then
|
||||
rpmbuild --define "_topdir $RPMBUILD" --define "fedora 21" --rebuild build/srpms/gcc-5.1.1-4.fc22.src.rpm
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-isl-0.14-3.el7.centos.x86_64.rpm ]; then
|
||||
rpm --define "_topdir $RPMBUILD" -ivh build/srpms/isl-0.14-3.fc22.src.rpm
|
||||
patch $RPMBUILD/SPECS/isl.spec < dist/redhat/centos_dep/isl.diff
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba $RPMBUILD/SPECS/isl.spec
|
||||
fi
|
||||
do_install *5.1.1-4*
|
||||
do_install scylla-isl-0.14-3.el7.centos.x86_64.rpm
|
||||
do_install scylla-isl-devel-0.14-3.el7.centos.x86_64.rpm
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/boost-1.57.0-6.el7.centos.x86_64.rpm ]; then
|
||||
rpmbuild --define "_topdir $RPMBUILD" --without python3 --rebuild build/srpms/boost-1.57.0-6.fc22.src.rpm
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-gcc-5.1.1-4.el7.centos.x86_64.rpm ]; then
|
||||
rpm --define "_topdir $RPMBUILD" -ivh build/srpms/gcc-5.1.1-4.fc22.src.rpm
|
||||
patch $RPMBUILD/SPECS/gcc.spec < dist/redhat/centos_dep/gcc.diff
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba $RPMBUILD/SPECS/gcc.spec
|
||||
fi
|
||||
do_install boost*
|
||||
do_install scylla-*5.1.1-4*
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/re2c-0.13.5-9.el7.centos.x86_64.rpm ]; then
|
||||
rpmbuild --define "_topdir $RPMBUILD" --rebuild build/srpms/re2c-0.13.5-9.fc22.src.rpm
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-boost-1.57.0-6.el7.centos.x86_64.rpm ]; then
|
||||
rpm --define "_topdir $RPMBUILD" -ivh build/srpms/boost-1.57.0-6.fc22.src.rpm
|
||||
patch $RPMBUILD/SPECS/boost.spec < dist/redhat/centos_dep/boost.diff
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba $RPMBUILD/SPECS/boost.spec
|
||||
fi
|
||||
do_install re2c-0.13.5-9.el7.centos.x86_64.rpm
|
||||
do_install scylla-boost*
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/ninja-build-1.5.3-2.el7.centos.x86_64.rpm ]; then
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-ninja-build-1.5.3-2.el7.centos.x86_64.rpm ]; then
|
||||
rpm --define "_topdir $RPMBUILD" -ivh build/srpms/ninja-build-1.5.3-2.fc22.src.rpm
|
||||
patch $RPMBUILD/SPECS/ninja-build.spec < dist/redhat/centos_dep/ninja-build.diff
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba $RPMBUILD/SPECS/ninja-build.spec
|
||||
fi
|
||||
do_install ninja-build-1.5.3-2.el7.centos.x86_64.rpm
|
||||
do_install scylla-ninja-build-1.5.3-2.el7.centos.x86_64.rpm
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/ragel-6.8-3.el7.centos.x86_64.rpm ]; then
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-ragel-6.8-3.el7.centos.x86_64.rpm ]; then
|
||||
rpm --define "_topdir $RPMBUILD" -ivh build/srpms/ragel-6.8-3.fc22.src.rpm
|
||||
patch $RPMBUILD/SPECS/ragel.spec < dist/redhat/centos_dep/ragel.diff
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba $RPMBUILD/SPECS/ragel.spec
|
||||
fi
|
||||
do_install ragel-6.8-3.el7.centos.x86_64.rpm
|
||||
do_install scylla-ragel-6.8-3.el7.centos.x86_64.rpm
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/noarch/antlr3-tool-3.5.2-1.el7.centos.noarch.rpm ]; then
|
||||
mkdir build/antlr3-tool-3.5.2
|
||||
cp dist/redhat/centos_dep/antlr3 build/antlr3-tool-3.5.2
|
||||
cd build/antlr3-tool-3.5.2
|
||||
if [ ! -f $RPMBUILD/RPMS/noarch/scylla-antlr3-tool-3.5.2-1.el7.centos.noarch.rpm ]; then
|
||||
mkdir build/scylla-antlr3-tool-3.5.2
|
||||
cp dist/redhat/centos_dep/antlr3 build/scylla-antlr3-tool-3.5.2
|
||||
cd build/scylla-antlr3-tool-3.5.2
|
||||
wget http://www.antlr3.org/download/antlr-3.5.2-complete-no-st3.jar
|
||||
cd -
|
||||
cd build
|
||||
tar cJpf $RPMBUILD/SOURCES/antlr3-tool-3.5.2.tar.xz antlr3-tool-3.5.2
|
||||
tar cJpf $RPMBUILD/SOURCES/scylla-antlr3-tool-3.5.2.tar.xz scylla-antlr3-tool-3.5.2
|
||||
cd -
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba dist/redhat/centos_dep/antlr3-tool.spec
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba dist/redhat/centos_dep/scylla-antlr3-tool.spec
|
||||
fi
|
||||
do_install antlr3-tool-3.5.2-1.el7.centos.noarch.rpm
|
||||
do_install scylla-antlr3-tool-3.5.2-1.el7.centos.noarch.rpm
|
||||
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/antlr3-C++-devel-3.5.2-1.el7.centos.x86_64.rpm ];then
|
||||
if [ ! -f $RPMBUILD/RPMS/x86_64/scylla-antlr3-C++-devel-3.5.2-1.el7.centos.x86_64.rpm ];then
|
||||
wget -O build/3.5.2.tar.gz https://github.com/antlr/antlr3/archive/3.5.2.tar.gz
|
||||
mv build/3.5.2.tar.gz $RPMBUILD/SOURCES
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba dist/redhat/centos_dep/antlr3-C++-devel.spec
|
||||
rpmbuild --define "_topdir $RPMBUILD" -ba dist/redhat/centos_dep/scylla-antlr3-C++-devel.spec
|
||||
fi
|
||||
do_install antlr3-C++-devel-3.5.2-1.el7.centos.x86_64.rpm
|
||||
do_install scylla-antlr3-C++-devel-3.5.2-1.el7.centos.x86_64.rpm
|
||||
|
||||
1316
dist/redhat/centos_dep/gcc.diff
vendored
Normal file
1316
dist/redhat/centos_dep/gcc.diff
vendored
Normal file
File diff suppressed because it is too large
Load Diff
39
dist/redhat/centos_dep/isl.diff
vendored
Normal file
39
dist/redhat/centos_dep/isl.diff
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
--- isl.spec 2015-01-06 16:24:49.000000000 +0000
|
||||
+++ isl.spec.1 2015-10-18 12:12:38.000000000 +0000
|
||||
@@ -1,5 +1,5 @@
|
||||
Summary: Integer point manipulation library
|
||||
-Name: isl
|
||||
+Name: scylla-isl
|
||||
Version: 0.14
|
||||
License: MIT
|
||||
Group: System Environment/Libraries
|
||||
@@ -17,8 +17,10 @@
|
||||
|
||||
BuildRequires: gmp-devel
|
||||
BuildRequires: pkgconfig
|
||||
+Requires: scylla-env
|
||||
|
||||
Source0: http://isl.gforge.inria.fr/isl-%{version}.tar.xz
|
||||
+%define _prefix /opt/scylladb
|
||||
|
||||
%description
|
||||
isl is a library for manipulating sets and relations of integer points
|
||||
@@ -32,7 +34,8 @@
|
||||
|
||||
%package devel
|
||||
Summary: Development for building integer point manipulation library
|
||||
-Requires: isl%{?_isa} == %{version}-%{release}
|
||||
+Requires: scylla-env
|
||||
+Requires: scylla-isl%{?_isa} == %{version}-%{release}
|
||||
Requires: gmp-devel%{?_isa}
|
||||
Group: Development/Libraries
|
||||
|
||||
@@ -47,7 +50,7 @@
|
||||
graphs), dependence analysis and bounds on piecewise step-polynomials.
|
||||
|
||||
%prep
|
||||
-%setup -q
|
||||
+%setup -q -n isl-%{version}
|
||||
|
||||
%build
|
||||
%configure
|
||||
63
dist/redhat/centos_dep/ninja-build.diff
vendored
63
dist/redhat/centos_dep/ninja-build.diff
vendored
@@ -1,29 +1,34 @@
|
||||
--- rpmbuild/SPECS/ninja-build.spec 2015-02-08 17:53:10.000000000 -0500
|
||||
+++ rpmbuild/SPECS/ninja-build.spec.1 2015-09-17 21:21:52.343000000 -0400
|
||||
@@ -8,7 +8,7 @@
|
||||
Source1: ninja.vim
|
||||
# https://github.com/martine/ninja/pull/882
|
||||
Patch0: ninja-1.5.3-verbose-build.patch
|
||||
-BuildRequires: asciidoc
|
||||
+#BuildRequires: asciidoc
|
||||
BuildRequires: gtest-devel
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: re2c >= 0.11.3
|
||||
@@ -28,7 +28,7 @@
|
||||
%build
|
||||
CFLAGS="%{optflags}" LDFLAGS="%{?__global_ldflags}" \
|
||||
%{__python2} configure.py --bootstrap --verbose
|
||||
-./ninja -v manual
|
||||
+#./ninja -v manual
|
||||
./ninja -v ninja_test
|
||||
|
||||
%install
|
||||
@@ -48,7 +48,7 @@
|
||||
./ninja_test
|
||||
|
||||
%files
|
||||
-%doc COPYING HACKING.md README doc/manual.html
|
||||
+%doc COPYING HACKING.md README
|
||||
%{_bindir}/ninja-build
|
||||
%{_datadir}/bash-completion/completions/ninja-bash-completion
|
||||
%{_datadir}/emacs/site-lisp/ninja-mode.el
|
||||
1c1
|
||||
< Name: ninja-build
|
||||
---
|
||||
> Name: scylla-ninja-build
|
||||
8d7
|
||||
< Source1: ninja.vim
|
||||
10a10
|
||||
> Requires: scylla-env
|
||||
14,16c14,15
|
||||
< BuildRequires: re2c >= 0.11.3
|
||||
< Requires: emacs-filesystem
|
||||
< Requires: vim-filesystem
|
||||
---
|
||||
> #BuildRequires: scylla-re2c >= 0.11.3
|
||||
> %define _prefix /opt/scylladb
|
||||
35,37c34
|
||||
< # TODO: Install ninja_syntax.py?
|
||||
< mkdir -p %{buildroot}/{%{_bindir},%{_datadir}/bash-completion/completions,%{_datadir}/emacs/site-lisp,%{_datadir}/vim/vimfiles/syntax,%{_datadir}/vim/vimfiles/ftdetect,%{_datadir}/zsh/site-functions}
|
||||
<
|
||||
---
|
||||
> mkdir -p %{buildroot}/opt/scylladb/bin
|
||||
39,43d35
|
||||
< install -pm644 misc/bash-completion %{buildroot}%{_datadir}/bash-completion/completions/ninja-bash-completion
|
||||
< install -pm644 misc/ninja-mode.el %{buildroot}%{_datadir}/emacs/site-lisp/ninja-mode.el
|
||||
< install -pm644 misc/ninja.vim %{buildroot}%{_datadir}/vim/vimfiles/syntax/ninja.vim
|
||||
< install -pm644 %{SOURCE1} %{buildroot}%{_datadir}/vim/vimfiles/ftdetect/ninja.vim
|
||||
< install -pm644 misc/zsh-completion %{buildroot}%{_datadir}/zsh/site-functions/_ninja
|
||||
53,58d44
|
||||
< %{_datadir}/bash-completion/completions/ninja-bash-completion
|
||||
< %{_datadir}/emacs/site-lisp/ninja-mode.el
|
||||
< %{_datadir}/vim/vimfiles/syntax/ninja.vim
|
||||
< %{_datadir}/vim/vimfiles/ftdetect/ninja.vim
|
||||
< # zsh does not have a -filesystem package
|
||||
< %{_datadir}/zsh/
|
||||
|
||||
54
dist/redhat/centos_dep/ragel.diff
vendored
54
dist/redhat/centos_dep/ragel.diff
vendored
@@ -1,14 +1,44 @@
|
||||
--- rpmbuild/SPECS/ragel.spec 2014-08-18 07:55:49.000000000 -0400
|
||||
+++ rpmbuild/SPECS/ragel.spec.1 2015-09-17 22:06:27.623000000 -0400
|
||||
@@ -49,8 +49,10 @@
|
||||
--- ragel.spec 2014-08-18 11:55:49.000000000 +0000
|
||||
+++ ragel.spec.1 2015-10-18 12:18:23.000000000 +0000
|
||||
@@ -1,17 +1,20 @@
|
||||
-Name: ragel
|
||||
+Name: scylla-ragel
|
||||
+%define orig_name ragel
|
||||
Version: 6.8
|
||||
Release: 3%{?dist}
|
||||
Summary: Finite state machine compiler
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
-%doc COPYING ragel.vim CREDITS ChangeLog
|
||||
+%doc COPYING ragel.vim
|
||||
%doc doc/ragel-guide.pdf
|
||||
+%{_docdir}/%{name}/CREDITS
|
||||
+%{_docdir}/%{name}/ChangeLog
|
||||
%{_bindir}/ragel
|
||||
%{_mandir}/*/*
|
||||
Group: Development/Tools
|
||||
License: GPLv2+
|
||||
-URL: http://www.complang.org/%{name}/
|
||||
-Source0: http://www.complang.org/%{name}/%{name}-%{version}.tar.gz
|
||||
+URL: http://www.complang.org/%{orig_name}/
|
||||
+Source0: http://www.complang.org/%{orig_name}/%{orig_name}-%{version}.tar.gz
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
|
||||
|
||||
# for documentation building
|
||||
BuildRequires: gcc-objc, autoconf, gcc-c++
|
||||
+Requires: scylla-env
|
||||
Requires: gawk
|
||||
+%define _prefix /opt/scylladb
|
||||
|
||||
%description
|
||||
Ragel compiles finite state machines from regular languages into executable C,
|
||||
@@ -21,7 +24,7 @@
|
||||
done using inline operators that do not disrupt the regular language syntax.
|
||||
|
||||
%prep
|
||||
-%setup -q
|
||||
+%setup -q -n %{orig_name}-%{version}
|
||||
|
||||
# Pass fedora cflags correctly
|
||||
sed -i.flags \
|
||||
@@ -34,7 +37,7 @@
|
||||
|
||||
%build
|
||||
# set the names of the other programming commandline programs
|
||||
-%configure --docdir=%{_docdir}/%{name} RUBY=ruby JAVAC=javac GMCS=gmcs
|
||||
+%configure --docdir=%{_docdir}/%{name}-%{version} RUBY=ruby JAVAC=javac GMCS=gmcs
|
||||
|
||||
make %{?_smp_mflags}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
%global antlr_version 3.5.2
|
||||
|
||||
Name: antlr3-C++-devel
|
||||
Name: scylla-antlr3-C++-devel
|
||||
Version: %{antlr_version}
|
||||
Release: 1%{?dist}
|
||||
Summary: C++ runtime support for ANTLR-generated parsers
|
||||
@@ -8,6 +8,8 @@ Summary: C++ runtime support for ANTLR-generated parsers
|
||||
License: BSD
|
||||
URL: http://www.antlr3.org/
|
||||
Source0: https://github.com/antlr/antlr3/archive/%{antlr_version}.tar.gz
|
||||
Requires: scylla-env
|
||||
%define _prefix /opt/scylladb
|
||||
|
||||
%description
|
||||
C++ runtime support for ANTLR-generated parsers.
|
||||
@@ -1,6 +1,6 @@
|
||||
%global antlr_version 3.5.2
|
||||
|
||||
Name: antlr3-tool
|
||||
Name: scylla-antlr3-tool
|
||||
Version: %{antlr_version}
|
||||
Release: 1%{?dist}
|
||||
Summary: ANother Tool for Language Recognition
|
||||
@@ -11,6 +11,8 @@ Source0: %{name}-%{version}.tar.xz
|
||||
|
||||
BuildArch: noarch
|
||||
Requires: java-1.7.0-openjdk
|
||||
Requires: scylla-env
|
||||
%define _prefix /opt/scylladb
|
||||
|
||||
%description
|
||||
ANother Tool for Language Recognition, is a language tool
|
||||
1
dist/redhat/centos_dep/scylla-env-1.0/ld.so.conf.d/scylla.x86_64.conf
vendored
Normal file
1
dist/redhat/centos_dep/scylla-env-1.0/ld.so.conf.d/scylla.x86_64.conf
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/opt/scylladb/lib64
|
||||
2
dist/redhat/centos_dep/scylla-env-1.0/profile.d/scylla.csh
vendored
Normal file
2
dist/redhat/centos_dep/scylla-env-1.0/profile.d/scylla.csh
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
set path = ($PATH /opt/scylladb/bin /opt/scylladb/sbin)
|
||||
|
||||
1
dist/redhat/centos_dep/scylla-env-1.0/profile.d/scylla.sh
vendored
Normal file
1
dist/redhat/centos_dep/scylla-env-1.0/profile.d/scylla.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export PATH=$PATH:/opt/scylladb/bin:/opt/scylladb/sbin
|
||||
40
dist/redhat/centos_dep/scylla-env.spec
vendored
Normal file
40
dist/redhat/centos_dep/scylla-env.spec
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
Name: scylla-env
|
||||
Version: 1.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Scylla is a highly scalable, eventually consistent, distributed, partitioned row DB.
|
||||
|
||||
Group: Applications/Databases
|
||||
License: AGPLv3
|
||||
URL: http://www.scylladb.com/
|
||||
Source0: scylla-env-1.0.tar
|
||||
BuildArch: noarch
|
||||
|
||||
%description
|
||||
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
|
||||
|
||||
%build
|
||||
|
||||
|
||||
%install
|
||||
rm -rf $RPM_BUILD_ROOT
|
||||
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/profile.d
|
||||
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/ld.so.conf.d
|
||||
install -m 644 profile.d/* $RPM_BUILD_ROOT%{_sysconfdir}/profile.d
|
||||
install -m 644 ld.so.conf.d/* $RPM_BUILD_ROOT%{_sysconfdir}/ld.so.conf.d
|
||||
|
||||
%post
|
||||
%{_sbindir}/ldconfig
|
||||
|
||||
%files
|
||||
%doc
|
||||
%{_sysconfdir}/profile.d/scylla.sh
|
||||
%{_sysconfdir}/profile.d/scylla.csh
|
||||
%{_sysconfdir}/ld.so.conf.d/scylla.x86_64.conf
|
||||
|
||||
|
||||
%changelog
|
||||
|
||||
12
dist/redhat/scylla-server.spec.in
vendored
12
dist/redhat/scylla-server.spec.in
vendored
@@ -8,9 +8,9 @@ License: AGPLv3
|
||||
URL: http://www.scylladb.com/
|
||||
Source0: %{name}-@@VERSION@@-@@RELEASE@@.tar
|
||||
|
||||
BuildRequires: libaio-devel boost-devel libstdc++-devel cryptopp-devel hwloc-devel numactl-devel libpciaccess-devel libxml2-devel zlib-devel thrift-devel yaml-cpp-devel lz4-devel snappy-devel jsoncpp-devel systemd-devel xz-devel openssl-devel libcap-devel libselinux-devel libgcrypt-devel libgpg-error-devel elfutils-devel krb5-devel libcom_err-devel libattr-devel pcre-devel elfutils-libelf-devel bzip2-devel keyutils-libs-devel ninja-build ragel antlr3-tool antlr3-C++-devel xfsprogs-devel make
|
||||
%{?fedora:BuildRequires: python3 gcc-c++ libasan libubsan}
|
||||
%{?rhel:BuildRequires: python34 gcc-c++ >= 5.1.1}
|
||||
BuildRequires: libaio-devel boost-devel libstdc++-devel cryptopp-devel hwloc-devel numactl-devel libpciaccess-devel libxml2-devel zlib-devel thrift-devel yaml-cpp-devel lz4-devel snappy-devel jsoncpp-devel systemd-devel xz-devel openssl-devel libcap-devel libselinux-devel libgcrypt-devel libgpg-error-devel elfutils-devel krb5-devel libcom_err-devel libattr-devel pcre-devel elfutils-libelf-devel bzip2-devel keyutils-libs-devel xfsprogs-devel make
|
||||
%{?fedora:BuildRequires: ninja-build ragel antlr3-tool antlr3-C++-devel python3 gcc-c++ libasan libubsan}
|
||||
%{?rhel:BuildRequires: scylla-ninja-build scylla-ragel scylla-antlr3-tool scylla-antlr3-C++-devel python34 scylla-gcc-c++ >= 5.1.1}
|
||||
Requires: systemd-libs xfsprogs
|
||||
|
||||
%description
|
||||
@@ -23,7 +23,7 @@ Requires: systemd-libs xfsprogs
|
||||
./configure.py --with scylla --disable-xen --enable-dpdk --mode=release
|
||||
%endif
|
||||
%if 0%{?rhel}
|
||||
./configure.py --with scylla --disable-xen --enable-dpdk --mode=release --static-stdc++
|
||||
./configure.py --with scylla --disable-xen --enable-dpdk --mode=release --static-stdc++ --compiler=/opt/scylladb/bin/g++
|
||||
%endif
|
||||
ninja-build -j2
|
||||
|
||||
@@ -39,7 +39,7 @@ mkdir -p $RPM_BUILD_ROOT%{_prefix}/lib/scylla/
|
||||
install -m644 dist/redhat/sysconfig/scylla-server $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/
|
||||
install -m644 dist/redhat/limits.d/scylla.conf $RPM_BUILD_ROOT%{_sysconfdir}/security/limits.d/
|
||||
install -m644 dist/redhat/systemd/scylla-server.service $RPM_BUILD_ROOT%{_unitdir}/
|
||||
install -m755 dist/redhat/scripts/* $RPM_BUILD_ROOT%{_prefix}/lib/scylla/
|
||||
install -m755 dist/common/scripts/* $RPM_BUILD_ROOT%{_prefix}/lib/scylla/
|
||||
install -m755 seastar/scripts/posix_net_conf.sh $RPM_BUILD_ROOT%{_prefix}/lib/scylla/
|
||||
install -m755 seastar/dpdk/tools/dpdk_nic_bind.py $RPM_BUILD_ROOT%{_prefix}/lib/scylla/
|
||||
install -m755 build/release/scylla $RPM_BUILD_ROOT%{_bindir}
|
||||
@@ -55,6 +55,7 @@ install -d -m755 $RPM_BUILD_ROOT%{_sharedstatedir}/scylla/data
|
||||
install -d -m755 $RPM_BUILD_ROOT%{_sharedstatedir}/scylla/commitlog
|
||||
install -d -m755 $RPM_BUILD_ROOT%{_sharedstatedir}/scylla/conf
|
||||
install -m644 conf/scylla.yaml $RPM_BUILD_ROOT%{_sharedstatedir}/scylla/conf/
|
||||
install -m644 conf/cassandra-rackdc.properties $RPM_BUILD_ROOT%{_sharedstatedir}/scylla/conf/
|
||||
|
||||
%pre
|
||||
/usr/sbin/groupadd scylla 2> /dev/null || :
|
||||
@@ -101,6 +102,7 @@ rm -rf $RPM_BUILD_ROOT
|
||||
%attr(0755,scylla,scylla) %dir %{_sharedstatedir}/scylla/commitlog
|
||||
%attr(0755,root,root) %dir %{_sharedstatedir}/scylla/conf/
|
||||
%{_sharedstatedir}/scylla/conf/scylla.yaml
|
||||
%{_sharedstatedir}/scylla/conf/cassandra-rackdc.properties
|
||||
|
||||
%changelog
|
||||
* Tue Jul 21 2015 Takuya ASADA <syuu@cloudius-systems.com>
|
||||
|
||||
27
dist/ubuntu/build_deb.sh
vendored
Executable file
27
dist/ubuntu/build_deb.sh
vendored
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
if [ ! -e dist/ubuntu/build_deb.sh ]; then
|
||||
echo "run build_deb.sh in top of scylla dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo apt-get -y update
|
||||
|
||||
./dist/ubuntu/dep/build_dependency.sh
|
||||
|
||||
sudo apt-get -y install libyaml-cpp-dev liblz4-dev libsnappy-dev libcrypto++-dev libboost1.55-dev libjsoncpp-dev libaio-dev ragel ninja-build git libyaml-cpp0.5 liblz4-1 libsnappy1 libcrypto++9 libboost-program-options1.55.0 libboost-program-options1.55-dev libboost-system1.55.0 libboost-system1.55-dev libboost-thread1.55.0 libboost-thread1.55-dev libboost-test1.55.0 libboost-test1.55-dev libjsoncpp0 libaio1 hugepages software-properties-common libboost-filesystem1.55-dev libboost-filesystem1.55.0
|
||||
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install g++-4.9
|
||||
|
||||
VERSION=$(./SCYLLA-VERSION-GEN)
|
||||
SCYLLA_VERSION=$(cat build/SCYLLA-VERSION-FILE)
|
||||
SCYLLA_RELEASE=$(cat build/SCYLLA-RELEASE-FILE)
|
||||
if [ "$SCYLLA_VERSION" = "development" ]; then
|
||||
SCYLLA_VERSION=0development
|
||||
fi
|
||||
cp dist/ubuntu/changelog.in debian/changelog
|
||||
sed -i -e "s/@@VERSION@@/$SCYLLA_VERSION/g" debian/changelog
|
||||
sed -i -e "s/@@RELEASE@@/$SCYLLA_RELEASE/g" debian/changelog
|
||||
|
||||
debuild -r fakeroot --no-tgz-check -us -uc
|
||||
5
dist/ubuntu/changelog.in
vendored
Normal file
5
dist/ubuntu/changelog.in
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
scylla-server (@@VERSION@@-@@RELEASE@@-ubuntu1) trusty; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
||||
-- Takuya ASADA <syuu@scylladb.com> Mon, 24 Aug 2015 09:22:55 +0000
|
||||
5
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/changelog
vendored
Normal file
5
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/changelog
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
antlr3-c++-dev (3.5.2-ubuntu1) trusty; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
||||
-- Takuya ASADA <syuu@scylladb.com> Mon, 24 Aug 2015 09:22:55 +0000
|
||||
1
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/compat
vendored
Normal file
1
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/compat
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9
|
||||
12
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/control
vendored
Normal file
12
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/control
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
Source: antlr3-c++-dev
|
||||
Maintainer: Takuya ASADA <syuu@scylladb.com>
|
||||
Section: misc
|
||||
Priority: optional
|
||||
Standards-Version: 3.5.2
|
||||
Build-Depends: debhelper (>= 9)
|
||||
|
||||
Package: antlr3-c++-dev
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Description: language tool for constructing recognizers, compilers etc
|
||||
A language tool that provides a framework for constructing recognizers, interpreters, compilers, and translators from grammatical descriptions containing actions in a variety of target languages.
|
||||
12
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/copyright
vendored
Normal file
12
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/copyright
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: ANTLR
|
||||
Upstream-Contact: http://www.antlr.org/
|
||||
Source: https://github.com/antlr/antlr3
|
||||
|
||||
Files: *
|
||||
Copyright: Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: debian/*
|
||||
Copyright: Copyright (c) 2015 ScyllaDB
|
||||
License: AGPL-3.0
|
||||
8
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/rules
vendored
Executable file
8
dist/ubuntu/dep/antlr3-c++-dev-3.5.2/debian/rules
vendored
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
override_dh_auto_install:
|
||||
mkdir -p $(CURDIR)/debian/antlr3-c++-dev/usr/include
|
||||
cp $(CURDIR)/runtime/Cpp/include/* \
|
||||
$(CURDIR)/debian/antlr3-c++-dev/usr/include
|
||||
%:
|
||||
dh $@
|
||||
3
dist/ubuntu/dep/antlr3-tool-3.5.2/antlr3
vendored
Executable file
3
dist/ubuntu/dep/antlr3-tool-3.5.2/antlr3
vendored
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
exec /usr/bin/java -jar /usr/share/java/antlr-3.5.2-complete-no-st3.jar $*
|
||||
5
dist/ubuntu/dep/antlr3-tool-3.5.2/debian/changelog
vendored
Normal file
5
dist/ubuntu/dep/antlr3-tool-3.5.2/debian/changelog
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
antlr3-tool (3.5.2-ubuntu1) trusty; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
||||
-- Takuya ASADA <syuu@scylladb.com> Mon, 24 Aug 2015 09:22:55 +0000
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user